blob: 6c10fece124566699715cd9b3aaf9cf43a1f4bbe [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666{
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 struct be_tx_stats *stats = tx_stats(txo);
668
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000688 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689}
690
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530692 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100697 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
Sathya Perlac9c47142014-03-27 10:46:19 +0530707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Sathya Perlac9c47142014-03-27 10:46:19 +0530724 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530736 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100748 if (skb_vlan_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 }
753
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530765 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000772 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000773 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000776 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000778 }
779}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000786 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500787 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000789 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 queue_head_inc(txq);
799
David S. Millerebc8d2a2009-06-09 01:01:31 -0700800 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700801 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530802
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000805 goto dma_err;
806 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ian Campbellb061b392011-08-29 23:18:23 +0000817 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000818 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000820 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 }
827
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla7101e112010-03-22 20:41:12 +0000838dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 while (copied) {
846 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000847 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 map_single = false;
849 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530850 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000851 queue_head_inc(txq);
852 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500853 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000854 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855}
856
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000858 struct sk_buff *skb,
859 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000860{
861 u16 vlan_tag = 0;
862
863 skb = skb_share_check(skb, GFP_ATOMIC);
864 if (unlikely(!skb))
865 return skb;
866
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100867 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000868 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530869
870 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
871 if (!vlan_tag)
872 vlan_tag = adapter->pvid;
873 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
874 * skip VLAN insertion
875 */
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000879
880 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100881 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
882 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100891 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
892 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000893 if (unlikely(!skb))
894 return skb;
895 if (skip_hw_vlan)
896 *skip_hw_vlan = true;
897 }
898
Somnath Kotur93040ae2012-06-26 22:32:10 +0000899 return skb;
900}
901
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000902static bool be_ipv6_exthdr_check(struct sk_buff *skb)
903{
904 struct ethhdr *eh = (struct ethhdr *)skb->data;
905 u16 offset = ETH_HLEN;
906
907 if (eh->h_proto == htons(ETH_P_IPV6)) {
908 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
909
910 offset += sizeof(struct ipv6hdr);
911 if (ip6h->nexthdr != NEXTHDR_TCP &&
912 ip6h->nexthdr != NEXTHDR_UDP) {
913 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530914 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000915
916 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
917 if (ehdr->hdrlen == 0xff)
918 return true;
919 }
920 }
921 return false;
922}
923
924static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
925{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100926 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000927}
928
Sathya Perla748b5392014-05-09 13:29:13 +0530929static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000930{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000931 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000932}
933
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530934static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
935 struct sk_buff *skb,
936 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000938 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000939 unsigned int eth_hdr_len;
940 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000941
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000942 /* For padded packets, BE HW modifies tot_len field in IP header
943 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000944 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000945 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000946 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
947 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000948 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100949 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000950 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000951 ip = (struct iphdr *)ip_hdr(skb);
952 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
953 }
954
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000955 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530956 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000957 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530958 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000959 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530960 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000961
Somnath Kotur93040ae2012-06-26 22:32:10 +0000962 /* HW has a bug wherein it will calculate CSUM for VLAN
963 * pkts even though it is disabled.
964 * Manually insert VLAN in pkt.
965 */
966 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100967 skb_vlan_tag_present(skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000968 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000969 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530970 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 }
972
973 /* HW may lockup when VLAN HW tagging is requested on
974 * certain ipv6 packets. Drop such pkts if the HW workaround to
975 * skip HW tagging is not enabled by FW.
976 */
977 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530978 (adapter->pvid || adapter->qnq_vid) &&
979 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000980 goto tx_drop;
981
982 /* Manual VLAN tag insertion to prevent:
983 * ASIC lockup when the ASIC inserts VLAN tag into
984 * certain ipv6 packets. Insert VLAN tags in driver,
985 * and set event, completion, vlan bits accordingly
986 * in the Tx WRB.
987 */
988 if (be_ipv6_tx_stall_chk(adapter, skb) &&
989 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000990 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000991 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530992 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000993 }
994
Sathya Perlaee9c7992013-05-22 23:04:55 +0000995 return skb;
996tx_drop:
997 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530998err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000999 return NULL;
1000}
1001
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301002static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1003 struct sk_buff *skb,
1004 bool *skip_hw_vlan)
1005{
1006 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1007 * less may cause a transmit stall on that port. So the work-around is
1008 * to pad short packets (<= 32 bytes) to a 36-byte length.
1009 */
1010 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001011 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301012 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001024static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1025{
1026 struct be_queue_info *txq = &txo->q;
1027 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1028
1029 /* Mark the last request eventable if it hasn't been marked already */
1030 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1031 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1032
1033 /* compose a dummy wrb if there are odd set of wrbs to notify */
1034 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1035 wrb_fill(queue_head_node(txq), 0, 0);
1036 queue_head_inc(txq);
1037 atomic_inc(&txq->used);
1038 txo->pend_wrb_cnt++;
1039 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1040 TX_HDR_WRB_NUM_SHIFT);
1041 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1042 TX_HDR_WRB_NUM_SHIFT);
1043 }
1044 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1045 txo->pend_wrb_cnt = 0;
1046}
1047
Sathya Perlaee9c7992013-05-22 23:04:55 +00001048static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1049{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001050 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001051 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001052 u16 q_idx = skb_get_queue_mapping(skb);
1053 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001054 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001055 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056
1057 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001058 if (unlikely(!skb))
1059 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001060
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001061 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1062 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001064 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001066
1067 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1068 netif_stop_subqueue(netdev, q_idx);
1069 tx_stats(txo)->tx_stops++;
1070 }
1071
1072 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1073 be_xmit_flush(adapter, txo);
1074
1075 return NETDEV_TX_OK;
1076drop:
1077 tx_stats(txo)->tx_drv_drops++;
1078 /* Flush the already enqueued tx requests */
1079 if (flush && txo->pend_wrb_cnt)
1080 be_xmit_flush(adapter, txo);
1081
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return NETDEV_TX_OK;
1083}
1084
1085static int be_change_mtu(struct net_device *netdev, int new_mtu)
1086{
1087 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301088 struct device *dev = &adapter->pdev->dev;
1089
1090 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1091 dev_info(dev, "MTU must be between %d and %d bytes\n",
1092 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 return -EINVAL;
1094 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301095
1096 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301097 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 netdev->mtu = new_mtu;
1099 return 0;
1100}
1101
1102/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001103 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1104 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 */
Sathya Perla10329df2012-06-05 19:37:18 +00001106static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107{
Vasundhara Volam50762662014-09-12 17:39:14 +05301108 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001109 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301110 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001111 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001112
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001113 /* No need to further configure vids if in promiscuous mode */
1114 if (adapter->promiscuous)
1115 return 0;
1116
Sathya Perla92bf14a2013-08-27 16:57:32 +05301117 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001118 goto set_vlan_promisc;
1119
1120 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301121 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1122 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001123
Kalesh AP4d567d92014-05-09 13:29:17 +05301124 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001125 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001126 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301127 if (addl_status(status) ==
1128 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001129 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301130 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001131 } else {
1132 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1133 /* hw VLAN filtering re-enabled. */
1134 status = be_cmd_rx_filter(adapter,
1135 BE_FLAGS_VLAN_PROMISC, OFF);
1136 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301137 dev_info(dev,
1138 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001139 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001140 }
1141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001143
Sathya Perlab31c50a2009-09-17 10:30:13 -07001144 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145
1146set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301147 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1148 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001149
1150 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1151 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301152 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001153 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1154 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301155 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001156 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157}
1158
Patrick McHardy80d5c362013-04-19 02:04:28 +00001159static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160{
1161 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001162 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001164 /* Packets with VID 0 are always received by Lancer by default */
1165 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301166 return status;
1167
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301168 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301169 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001170
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301171 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301172 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001173
Somnath Kotura6b74e02014-01-21 15:50:55 +05301174 status = be_vid_config(adapter);
1175 if (status) {
1176 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301177 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301178 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301179
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001180 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181}
1182
Patrick McHardy80d5c362013-04-19 02:04:28 +00001183static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184{
1185 struct be_adapter *adapter = netdev_priv(netdev);
1186
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001187 /* Packets with VID 0 are always received by Lancer by default */
1188 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301189 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001190
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301191 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301192 adapter->vlans_added--;
1193
1194 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195}
1196
Somnath kotur7ad09452014-03-03 14:24:43 +05301197static void be_clear_promisc(struct be_adapter *adapter)
1198{
1199 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301200 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301201
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203}
1204
Sathya Perlaa54769f2011-10-24 02:45:00 +00001205static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206{
1207 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001208 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
1210 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001211 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001212 adapter->promiscuous = true;
1213 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001215
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001216 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001217 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301218 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001219 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001220 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001221 }
1222
Sathya Perlae7b909a2009-11-22 22:01:10 +00001223 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001224 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301225 netdev_mc_count(netdev) > be_max_mc(adapter))
1226 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001227
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001228 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1229 struct netdev_hw_addr *ha;
1230 int i = 1; /* First slot is claimed by the Primary MAC */
1231
1232 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1233 be_cmd_pmac_del(adapter, adapter->if_handle,
1234 adapter->pmac_id[i], 0);
1235 }
1236
Sathya Perla92bf14a2013-08-27 16:57:32 +05301237 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001238 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1239 adapter->promiscuous = true;
1240 goto done;
1241 }
1242
1243 netdev_for_each_uc_addr(ha, adapter->netdev) {
1244 adapter->uc_macs++; /* First slot is for Primary MAC */
1245 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1246 adapter->if_handle,
1247 &adapter->pmac_id[adapter->uc_macs], 0);
1248 }
1249 }
1250
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001251 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301252 if (!status) {
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1255 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001256 }
Kalesh APa0794882014-05-30 19:06:23 +05301257
1258set_mcast_promisc:
1259 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1260 return;
1261
1262 /* Set to MCAST promisc mode if setting MULTICAST address fails
1263 * or if num configured exceeds what we support
1264 */
1265 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1266 if (!status)
1267 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001268done:
1269 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270}
1271
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001272static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1273{
1274 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001275 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276 int status;
1277
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001279 return -EPERM;
1280
Sathya Perla11ac75e2011-12-13 00:58:50 +00001281 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001282 return -EINVAL;
1283
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301284 /* Proceed further only if user provided MAC is different
1285 * from active MAC
1286 */
1287 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1288 return 0;
1289
Sathya Perla3175d8c2013-07-23 15:25:03 +05301290 if (BEx_chip(adapter)) {
1291 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1292 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001293
Sathya Perla11ac75e2011-12-13 00:58:50 +00001294 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1295 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301296 } else {
1297 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1298 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001299 }
1300
Kalesh APabccf232014-07-17 16:20:24 +05301301 if (status) {
1302 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1303 mac, vf, status);
1304 return be_cmd_status(status);
1305 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001306
Kalesh APabccf232014-07-17 16:20:24 +05301307 ether_addr_copy(vf_cfg->mac_addr, mac);
1308
1309 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001310}
1311
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001312static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301313 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001314{
1315 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001316 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001317
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319 return -EPERM;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001322 return -EINVAL;
1323
1324 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001325 vi->max_tx_rate = vf_cfg->tx_rate;
1326 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001327 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1328 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001329 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301330 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001331
1332 return 0;
1333}
1334
Sathya Perla748b5392014-05-09 13:29:13 +05301335static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001336{
1337 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001338 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001339 int status = 0;
1340
Sathya Perla11ac75e2011-12-13 00:58:50 +00001341 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 return -EPERM;
1343
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001344 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001345 return -EINVAL;
1346
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001347 if (vlan || qos) {
1348 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301349 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001350 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1351 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001352 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001353 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301354 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1355 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001356 }
1357
Kalesh APabccf232014-07-17 16:20:24 +05301358 if (status) {
1359 dev_err(&adapter->pdev->dev,
1360 "VLAN %d config on VF %d failed : %#x\n", vlan,
1361 vf, status);
1362 return be_cmd_status(status);
1363 }
1364
1365 vf_cfg->vlan_tag = vlan;
1366
1367 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001368}
1369
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001370static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1371 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001372{
1373 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301374 struct device *dev = &adapter->pdev->dev;
1375 int percent_rate, status = 0;
1376 u16 link_speed = 0;
1377 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001378
Sathya Perla11ac75e2011-12-13 00:58:50 +00001379 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001380 return -EPERM;
1381
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001382 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001383 return -EINVAL;
1384
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001385 if (min_tx_rate)
1386 return -EINVAL;
1387
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301388 if (!max_tx_rate)
1389 goto config_qos;
1390
1391 status = be_cmd_link_status_query(adapter, &link_speed,
1392 &link_status, 0);
1393 if (status)
1394 goto err;
1395
1396 if (!link_status) {
1397 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301398 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301399 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001400 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001401
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301402 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1403 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1404 link_speed);
1405 status = -EINVAL;
1406 goto err;
1407 }
1408
1409 /* On Skyhawk the QOS setting must be done only as a % value */
1410 percent_rate = link_speed / 100;
1411 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1412 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1413 percent_rate);
1414 status = -EINVAL;
1415 goto err;
1416 }
1417
1418config_qos:
1419 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001420 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301421 goto err;
1422
1423 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1424 return 0;
1425
1426err:
1427 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1428 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301429 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001430}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301431
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301432static int be_set_vf_link_state(struct net_device *netdev, int vf,
1433 int link_state)
1434{
1435 struct be_adapter *adapter = netdev_priv(netdev);
1436 int status;
1437
1438 if (!sriov_enabled(adapter))
1439 return -EPERM;
1440
1441 if (vf >= adapter->num_vfs)
1442 return -EINVAL;
1443
1444 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301445 if (status) {
1446 dev_err(&adapter->pdev->dev,
1447 "Link state change on VF %d failed: %#x\n", vf, status);
1448 return be_cmd_status(status);
1449 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301450
Kalesh APabccf232014-07-17 16:20:24 +05301451 adapter->vf_cfg[vf].plink_tracking = link_state;
1452
1453 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301454}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001455
Sathya Perla2632baf2013-10-01 16:00:00 +05301456static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1457 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Sathya Perla2632baf2013-10-01 16:00:00 +05301459 aic->rx_pkts_prev = rx_pkts;
1460 aic->tx_reqs_prev = tx_pkts;
1461 aic->jiffies = now;
1462}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001463
Sathya Perla2632baf2013-10-01 16:00:00 +05301464static void be_eqd_update(struct be_adapter *adapter)
1465{
1466 struct be_set_eqd set_eqd[MAX_EVT_QS];
1467 int eqd, i, num = 0, start;
1468 struct be_aic_obj *aic;
1469 struct be_eq_obj *eqo;
1470 struct be_rx_obj *rxo;
1471 struct be_tx_obj *txo;
1472 u64 rx_pkts, tx_pkts;
1473 ulong now;
1474 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001475
Sathya Perla2632baf2013-10-01 16:00:00 +05301476 for_all_evt_queues(adapter, eqo, i) {
1477 aic = &adapter->aic_obj[eqo->idx];
1478 if (!aic->enable) {
1479 if (aic->jiffies)
1480 aic->jiffies = 0;
1481 eqd = aic->et_eqd;
1482 goto modify_eqd;
1483 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484
Sathya Perla2632baf2013-10-01 16:00:00 +05301485 rxo = &adapter->rx_obj[eqo->idx];
1486 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001487 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301488 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001489 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001490
Sathya Perla2632baf2013-10-01 16:00:00 +05301491 txo = &adapter->tx_obj[eqo->idx];
1492 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001493 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001495 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001496
Sathya Perla2632baf2013-10-01 16:00:00 +05301497 /* Skip, if wrapped around or first calculation */
1498 now = jiffies;
1499 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1500 rx_pkts < aic->rx_pkts_prev ||
1501 tx_pkts < aic->tx_reqs_prev) {
1502 be_aic_update(aic, rx_pkts, tx_pkts, now);
1503 continue;
1504 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001505
Sathya Perla2632baf2013-10-01 16:00:00 +05301506 delta = jiffies_to_msecs(now - aic->jiffies);
1507 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1508 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1509 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001510
Sathya Perla2632baf2013-10-01 16:00:00 +05301511 if (eqd < 8)
1512 eqd = 0;
1513 eqd = min_t(u32, eqd, aic->max_eqd);
1514 eqd = max_t(u32, eqd, aic->min_eqd);
1515
1516 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301518 if (eqd != aic->prev_eqd) {
1519 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1520 set_eqd[num].eq_id = eqo->q.id;
1521 aic->prev_eqd = eqd;
1522 num++;
1523 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001524 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301525
1526 if (num)
1527 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001528}
1529
Sathya Perla3abcded2010-10-03 22:12:27 -07001530static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301531 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001532{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001533 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001534
Sathya Perlaab1594e2011-07-25 19:10:15 +00001535 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001541 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001542 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001543 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544}
1545
Sathya Perla2e588f82011-03-11 02:49:26 +00001546static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001547{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001548 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301549 * Also ignore ipcksm for ipv6 pkts
1550 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301552 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001553}
1554
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301555static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001557 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301560 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 BUG_ON(!rx_page_info->page);
1564
Sathya Perlae50287b2014-03-04 12:14:38 +05301565 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001566 dma_unmap_page(&adapter->pdev->dev,
1567 dma_unmap_addr(rx_page_info, bus),
1568 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301569 rx_page_info->last_frag = false;
1570 } else {
1571 dma_sync_single_for_cpu(&adapter->pdev->dev,
1572 dma_unmap_addr(rx_page_info, bus),
1573 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301576 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577 atomic_dec(&rxq->used);
1578 return rx_page_info;
1579}
1580
1581/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582static void be_rx_compl_discard(struct be_rx_obj *rxo,
1583 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001586 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001588 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301589 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001590 put_page(page_info->page);
1591 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592 }
1593}
1594
1595/*
1596 * skb_fill_rx_data forms a complete skb for an ether frame
1597 * indicated by rxcp.
1598 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001599static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1600 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001603 u16 i, j;
1604 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 u8 *start;
1606
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301607 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 start = page_address(page_info->page) + page_info->page_offset;
1609 prefetch(start);
1610
1611 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001612 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 skb->len = curr_frag_len;
1615 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001616 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 /* Complete packet has now been moved to data */
1618 put_page(page_info->page);
1619 skb->data_len = 0;
1620 skb->tail += curr_frag_len;
1621 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001622 hdr_len = ETH_HLEN;
1623 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001625 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 skb_shinfo(skb)->frags[0].page_offset =
1627 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301628 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1629 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001631 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 skb->tail += hdr_len;
1633 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001634 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perla2e588f82011-03-11 02:49:26 +00001636 if (rxcp->pkt_size <= rx_frag_size) {
1637 BUG_ON(rxcp->num_rcvd != 1);
1638 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 }
1640
1641 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001642 remaining = rxcp->pkt_size - curr_frag_len;
1643 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301644 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001645 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001647 /* Coalesce all frags from the same physical page in one slot */
1648 if (page_info->page_offset == 0) {
1649 /* Fresh page */
1650 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001651 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001652 skb_shinfo(skb)->frags[j].page_offset =
1653 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001654 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001655 skb_shinfo(skb)->nr_frags++;
1656 } else {
1657 put_page(page_info->page);
1658 }
1659
Eric Dumazet9e903e02011-10-18 21:00:24 +00001660 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 skb->len += curr_frag_len;
1662 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001663 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001664 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001665 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001667 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668}
1669
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001670/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301671static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001672 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001675 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001677
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001678 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001679 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001680 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 return;
1683 }
1684
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001685 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001687 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001688 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001689 else
1690 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001692 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001693 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001694 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001695 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301696
Tom Herbertb6c0e892014-08-27 21:27:17 -07001697 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301698 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699
Jiri Pirko343e43c2011-08-25 02:50:51 +00001700 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001701 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001702
1703 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704}
1705
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001706/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001707static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1708 struct napi_struct *napi,
1709 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001711 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001713 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001714 u16 remaining, curr_frag_len;
1715 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001717 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001718 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001719 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001720 return;
1721 }
1722
Sathya Perla2e588f82011-03-11 02:49:26 +00001723 remaining = rxcp->pkt_size;
1724 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301725 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
1727 curr_frag_len = min(remaining, rx_frag_size);
1728
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001729 /* Coalesce all frags from the same physical page in one slot */
1730 if (i == 0 || page_info->page_offset == 0) {
1731 /* First frag or Fresh page */
1732 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001733 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001734 skb_shinfo(skb)->frags[j].page_offset =
1735 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001736 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001737 } else {
1738 put_page(page_info->page);
1739 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001740 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001741 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 memset(page_info, 0, sizeof(*page_info));
1744 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001745 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001747 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001748 skb->len = rxcp->pkt_size;
1749 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001750 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001751 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001752 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001753 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301754
Tom Herbertb6c0e892014-08-27 21:27:17 -07001755 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301756 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001757
Jiri Pirko343e43c2011-08-25 02:50:51 +00001758 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001759 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001760
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001761 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762}
1763
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001764static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1765 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301767 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1768 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1769 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1770 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1771 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1772 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1773 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1774 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1775 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1776 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1777 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001778 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301779 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1780 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001781 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301782 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301783 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301784 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001785}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001787static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1788 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001789{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301790 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1791 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1792 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1793 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1794 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1795 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1796 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1797 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1798 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1799 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1800 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001801 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301802 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1803 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001804 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301805 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1806 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001807}
1808
1809static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1810{
1811 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1812 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1813 struct be_adapter *adapter = rxo->adapter;
1814
1815 /* For checking the valid bit it is Ok to use either definition as the
1816 * valid bit is at the same position in both v0 and v1 Rx compl */
1817 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818 return NULL;
1819
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001820 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001821 be_dws_le_to_cpu(compl, sizeof(*compl));
1822
1823 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001824 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001825 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001827
Somnath Koture38b1702013-05-29 22:55:56 +00001828 if (rxcp->ip_frag)
1829 rxcp->l4_csum = 0;
1830
Sathya Perla15d72182011-03-21 20:49:26 +00001831 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301832 /* In QNQ modes, if qnq bit is not set, then the packet was
1833 * tagged only with the transparent outer vlan-tag and must
1834 * not be treated as a vlan packet by host
1835 */
1836 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001837 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001838
Sathya Perla15d72182011-03-21 20:49:26 +00001839 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001840 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001841
Somnath Kotur939cf302011-08-18 21:51:49 -07001842 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301843 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001844 rxcp->vlanf = 0;
1845 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001846
1847 /* As the compl has been parsed, reset it; we wont touch it again */
1848 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
Sathya Perla3abcded2010-10-03 22:12:27 -07001850 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 return rxcp;
1852}
1853
Eric Dumazet1829b082011-03-01 05:48:12 +00001854static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001857
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001859 gfp |= __GFP_COMP;
1860 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861}
1862
1863/*
1864 * Allocate a page, split it to fragments of size rx_frag_size and post as
1865 * receive buffers to BE
1866 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301867static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868{
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001870 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001873 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 struct be_eth_rx_d *rxd;
1875 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301876 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301879 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001881 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001883 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 break;
1885 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001886 page_dmaaddr = dma_map_page(dev, pagep, 0,
1887 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001888 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001889 if (dma_mapping_error(dev, page_dmaaddr)) {
1890 put_page(pagep);
1891 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301892 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001893 break;
1894 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301895 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 } else {
1897 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301898 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902
1903 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301904 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1906 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
1908 /* Any space left in the current big page for another frag? */
1909 if ((page_offset + rx_frag_size + rx_frag_size) >
1910 adapter->big_page_size) {
1911 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301912 page_info->last_frag = true;
1913 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1914 } else {
1915 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001917
1918 prev_page_info = page_info;
1919 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301922
1923 /* Mark the last frag of a page when we break out of the above loop
1924 * with no more slots available in the RXQ
1925 */
1926 if (pagep) {
1927 prev_page_info->last_frag = true;
1928 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1929 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930
1931 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301933 if (rxo->rx_post_starved)
1934 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301935 do {
1936 notify = min(256u, posted);
1937 be_rxq_notify(adapter, rxq->id, notify);
1938 posted -= notify;
1939 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001940 } else if (atomic_read(&rxq->used) == 0) {
1941 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001942 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944}
1945
Sathya Perla5fb379e2009-06-18 00:02:59 +00001946static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1949
1950 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1951 return NULL;
1952
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001953 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1955
1956 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1957
1958 queue_tail_inc(tx_cq);
1959 return txcp;
1960}
1961
Sathya Perla3c8def92011-06-12 20:01:58 +00001962static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301963 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964{
Sathya Perla3c8def92011-06-12 20:01:58 +00001965 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001966 struct be_queue_info *txq = &txo->q;
1967 u16 frag_index, num_wrbs = 0;
1968 struct sk_buff *skb = NULL;
1969 bool unmap_skb_hdr = false;
1970 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001972 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001973 if (sent_skbs[txq->tail]) {
1974 /* Free skb from prev req */
1975 if (skb)
1976 dev_consume_skb_any(skb);
1977 skb = sent_skbs[txq->tail];
1978 sent_skbs[txq->tail] = NULL;
1979 queue_tail_inc(txq); /* skip hdr wrb */
1980 num_wrbs++;
1981 unmap_skb_hdr = true;
1982 }
Alexander Duycka73b7962009-12-02 16:48:18 +00001983 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001984 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001985 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001986 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001987 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001989 num_wrbs++;
1990 } while (frag_index != last_index);
1991 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001993 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994}
1995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996/* Return the number of events in the event queue */
1997static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001998{
1999 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002001
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 do {
2003 eqe = queue_tail_node(&eqo->q);
2004 if (eqe->evt == 0)
2005 break;
2006
2007 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002008 eqe->evt = 0;
2009 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 queue_tail_inc(&eqo->q);
2011 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002012
2013 return num;
2014}
2015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016/* Leaves the EQ is disarmed state */
2017static void be_eq_clean(struct be_eq_obj *eqo)
2018{
2019 int num = events_get(eqo);
2020
2021 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2022}
2023
2024static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025{
2026 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 struct be_queue_info *rxq = &rxo->q;
2028 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002029 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002030 struct be_adapter *adapter = rxo->adapter;
2031 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032
Sathya Perlad23e9462012-12-17 19:38:51 +00002033 /* Consume pending rx completions.
2034 * Wait for the flush completion (identified by zero num_rcvd)
2035 * to arrive. Notify CQ even when there are no more CQ entries
2036 * for HW to flush partially coalesced CQ entries.
2037 * In Lancer, there is no need to wait for flush compl.
2038 */
2039 for (;;) {
2040 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302041 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002042 if (lancer_chip(adapter))
2043 break;
2044
2045 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2046 dev_warn(&adapter->pdev->dev,
2047 "did not receive flush compl\n");
2048 break;
2049 }
2050 be_cq_notify(adapter, rx_cq->id, true, 0);
2051 mdelay(1);
2052 } else {
2053 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002054 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002055 if (rxcp->num_rcvd == 0)
2056 break;
2057 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 }
2059
Sathya Perlad23e9462012-12-17 19:38:51 +00002060 /* After cleanup, leave the CQ in unarmed state */
2061 be_cq_notify(adapter, rx_cq->id, false, 0);
2062
2063 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302064 while (atomic_read(&rxq->used) > 0) {
2065 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066 put_page(page_info->page);
2067 memset(page_info, 0, sizeof(*page_info));
2068 }
2069 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302070 rxq->tail = 0;
2071 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072}
2073
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002074static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002076 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2077 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002078 struct be_tx_obj *txo;
2079 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002080 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002081 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302083 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002084 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002085 pending_txqs = adapter->num_tx_qs;
2086
2087 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302088 cmpl = 0;
2089 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002090 txq = &txo->q;
2091 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302092 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002093 num_wrbs += be_tx_compl_process(adapter, txo,
2094 end_idx);
2095 cmpl++;
2096 }
2097 if (cmpl) {
2098 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2099 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302100 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002101 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002102 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002103 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002104 }
2105
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302106 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002107 break;
2108
2109 mdelay(1);
2110 } while (true);
2111
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002112 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002113 for_all_tx_queues(adapter, txo, i) {
2114 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002115
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002116 if (atomic_read(&txq->used)) {
2117 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2118 i, atomic_read(&txq->used));
2119 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002120 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002121 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2122 txq->len);
2123 /* Use the tx-compl process logic to handle requests
2124 * that were not sent to the HW.
2125 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002126 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2127 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002128 BUG_ON(atomic_read(&txq->used));
2129 txo->pend_wrb_cnt = 0;
2130 /* Since hw was never notified of these requests,
2131 * reset TXQ indices
2132 */
2133 txq->head = notified_idx;
2134 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002135 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137}
2138
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139static void be_evt_queues_destroy(struct be_adapter *adapter)
2140{
2141 struct be_eq_obj *eqo;
2142 int i;
2143
2144 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002145 if (eqo->q.created) {
2146 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302148 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302149 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002150 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151 be_queue_free(adapter, &eqo->q);
2152 }
2153}
2154
2155static int be_evt_queues_create(struct be_adapter *adapter)
2156{
2157 struct be_queue_info *eq;
2158 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 int i, rc;
2161
Sathya Perla92bf14a2013-08-27 16:57:32 +05302162 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2163 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164
2165 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302166 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2167 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302168 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302169 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302172 aic->max_eqd = BE_MAX_EQD;
2173 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174
2175 eq = &eqo->q;
2176 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302177 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178 if (rc)
2179 return rc;
2180
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302181 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 if (rc)
2183 return rc;
2184 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002185 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186}
2187
Sathya Perla5fb379e2009-06-18 00:02:59 +00002188static void be_mcc_queues_destroy(struct be_adapter *adapter)
2189{
2190 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002191
Sathya Perla8788fdc2009-07-27 22:52:03 +00002192 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002194 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002195 be_queue_free(adapter, q);
2196
Sathya Perla8788fdc2009-07-27 22:52:03 +00002197 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002199 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 be_queue_free(adapter, q);
2201}
2202
2203/* Must be called only after TX qs are created as MCC shares TX EQ */
2204static int be_mcc_queues_create(struct be_adapter *adapter)
2205{
2206 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002207
Sathya Perla8788fdc2009-07-27 22:52:03 +00002208 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002209 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302210 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002211 goto err;
2212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 /* Use the default EQ for MCC completions */
2214 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002215 goto mcc_cq_free;
2216
Sathya Perla8788fdc2009-07-27 22:52:03 +00002217 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002218 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2219 goto mcc_cq_destroy;
2220
Sathya Perla8788fdc2009-07-27 22:52:03 +00002221 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002222 goto mcc_q_free;
2223
2224 return 0;
2225
2226mcc_q_free:
2227 be_queue_free(adapter, q);
2228mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002229 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002230mcc_cq_free:
2231 be_queue_free(adapter, cq);
2232err:
2233 return -1;
2234}
2235
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236static void be_tx_queues_destroy(struct be_adapter *adapter)
2237{
2238 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002239 struct be_tx_obj *txo;
2240 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sathya Perla3c8def92011-06-12 20:01:58 +00002242 for_all_tx_queues(adapter, txo, i) {
2243 q = &txo->q;
2244 if (q->created)
2245 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2246 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247
Sathya Perla3c8def92011-06-12 20:01:58 +00002248 q = &txo->cq;
2249 if (q->created)
2250 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2251 be_queue_free(adapter, q);
2252 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253}
2254
Sathya Perla77071332013-08-27 16:57:34 +05302255static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002258 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302259 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
Sathya Perla92bf14a2013-08-27 16:57:32 +05302261 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002262
Sathya Perla3c8def92011-06-12 20:01:58 +00002263 for_all_tx_queues(adapter, txo, i) {
2264 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2266 sizeof(struct be_eth_tx_compl));
2267 if (status)
2268 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269
John Stultz827da442013-10-07 15:51:58 -07002270 u64_stats_init(&txo->stats.sync);
2271 u64_stats_init(&txo->stats.sync_compl);
2272
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 /* If num_evt_qs is less than num_tx_qs, then more than
2274 * one txq share an eq
2275 */
2276 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2277 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2278 if (status)
2279 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2282 sizeof(struct be_eth_wrb));
2283 if (status)
2284 return status;
2285
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002286 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 if (status)
2288 return status;
2289 }
2290
Sathya Perlad3791422012-09-28 04:39:44 +00002291 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2292 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 return 0;
2294}
2295
2296static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297{
2298 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 struct be_rx_obj *rxo;
2300 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 q = &rxo->cq;
2304 if (q->created)
2305 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2306 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308}
2309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002311{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 struct be_rx_obj *rxo;
2314 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315
Sathya Perla92bf14a2013-08-27 16:57:32 +05302316 /* We can create as many RSS rings as there are EQs. */
2317 adapter->num_rx_qs = adapter->num_evt_qs;
2318
2319 /* We'll use RSS only if atleast 2 RSS rings are supported.
2320 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302322 if (adapter->num_rx_qs > 1)
2323 adapter->num_rx_qs++;
2324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002326 for_all_rx_queues(adapter, rxo, i) {
2327 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 cq = &rxo->cq;
2329 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302330 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002331 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333
John Stultz827da442013-10-07 15:51:58 -07002334 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2336 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002337 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002338 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Sathya Perlad3791422012-09-28 04:39:44 +00002341 dev_info(&adapter->pdev->dev,
2342 "created %d RSS queue(s) and 1 default RX queue\n",
2343 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002345}
2346
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347static irqreturn_t be_intx(int irq, void *dev)
2348{
Sathya Perlae49cc342012-11-27 19:50:02 +00002349 struct be_eq_obj *eqo = dev;
2350 struct be_adapter *adapter = eqo->adapter;
2351 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002353 /* IRQ is not expected when NAPI is scheduled as the EQ
2354 * will not be armed.
2355 * But, this can happen on Lancer INTx where it takes
2356 * a while to de-assert INTx or in BE2 where occasionaly
2357 * an interrupt may be raised even when EQ is unarmed.
2358 * If NAPI is already scheduled, then counting & notifying
2359 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002360 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002361 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002362 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002363 __napi_schedule(&eqo->napi);
2364 if (num_evts)
2365 eqo->spurious_intr = 0;
2366 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002367 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002368
2369 /* Return IRQ_HANDLED only for the the first spurious intr
2370 * after a valid intr to stop the kernel from branding
2371 * this irq as a bad one!
2372 */
2373 if (num_evts || eqo->spurious_intr++ == 0)
2374 return IRQ_HANDLED;
2375 else
2376 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377}
2378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382
Sathya Perla0b545a62012-11-23 00:27:18 +00002383 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2384 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 return IRQ_HANDLED;
2386}
2387
Sathya Perla2e588f82011-03-11 02:49:26 +00002388static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389{
Somnath Koture38b1702013-05-29 22:55:56 +00002390 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391}
2392
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302394 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395{
Sathya Perla3abcded2010-10-03 22:12:27 -07002396 struct be_adapter *adapter = rxo->adapter;
2397 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002398 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302400 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
2402 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002403 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404 if (!rxcp)
2405 break;
2406
Sathya Perla12004ae2011-08-02 19:57:46 +00002407 /* Is it a flush compl that has no data */
2408 if (unlikely(rxcp->num_rcvd == 0))
2409 goto loop_continue;
2410
2411 /* Discard compl with partial DMA Lancer B0 */
2412 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002414 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002415 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002416
Sathya Perla12004ae2011-08-02 19:57:46 +00002417 /* On BE drop pkts that arrive due to imperfect filtering in
2418 * promiscuous mode on some skews
2419 */
2420 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302421 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002423 goto loop_continue;
2424 }
2425
Sathya Perla6384a4d2013-10-25 10:40:16 +05302426 /* Don't do gro when we're busy_polling */
2427 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002429 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302430 be_rx_compl_process(rxo, napi, rxcp);
2431
Sathya Perla12004ae2011-08-02 19:57:46 +00002432loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302433 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002434 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 }
2436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 if (work_done) {
2438 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002439
Sathya Perla6384a4d2013-10-25 10:40:16 +05302440 /* When an rx-obj gets into post_starved state, just
2441 * let be_worker do the posting.
2442 */
2443 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2444 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302445 be_post_rx_frags(rxo, GFP_ATOMIC,
2446 max_t(u32, MAX_RX_POST,
2447 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002449
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 return work_done;
2451}
2452
Kalesh AP512bb8a2014-09-02 09:56:49 +05302453static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2454{
2455 switch (status) {
2456 case BE_TX_COMP_HDR_PARSE_ERR:
2457 tx_stats(txo)->tx_hdr_parse_err++;
2458 break;
2459 case BE_TX_COMP_NDMA_ERR:
2460 tx_stats(txo)->tx_dma_err++;
2461 break;
2462 case BE_TX_COMP_ACL_ERR:
2463 tx_stats(txo)->tx_spoof_check_err++;
2464 break;
2465 }
2466}
2467
2468static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2469{
2470 switch (status) {
2471 case LANCER_TX_COMP_LSO_ERR:
2472 tx_stats(txo)->tx_tso_err++;
2473 break;
2474 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2475 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2476 tx_stats(txo)->tx_spoof_check_err++;
2477 break;
2478 case LANCER_TX_COMP_QINQ_ERR:
2479 tx_stats(txo)->tx_qinq_err++;
2480 break;
2481 case LANCER_TX_COMP_PARITY_ERR:
2482 tx_stats(txo)->tx_internal_parity_err++;
2483 break;
2484 case LANCER_TX_COMP_DMA_ERR:
2485 tx_stats(txo)->tx_dma_err++;
2486 break;
2487 }
2488}
2489
Sathya Perlac8f64612014-09-02 09:56:55 +05302490static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2491 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302494 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302495 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302496 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
Sathya Perlac8f64612014-09-02 09:56:55 +05302498 while ((txcp = be_tx_compl_get(&txo->cq))) {
2499 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2500 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2501 work_done++;
2502
Kalesh AP512bb8a2014-09-02 09:56:49 +05302503 compl_status = GET_TX_COMPL_BITS(status, txcp);
2504 if (compl_status) {
2505 if (lancer_chip(adapter))
2506 lancer_update_tx_err(txo, compl_status);
2507 else
2508 be_update_tx_err(txo, compl_status);
2509 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 }
2511
2512 if (work_done) {
2513 be_cq_notify(adapter, txo->cq.id, true, work_done);
2514 atomic_sub(num_wrbs, &txo->q.used);
2515
2516 /* As Tx wrbs have been freed up, wake up netdev queue
2517 * if it was stopped due to lack of tx wrbs. */
2518 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302519 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002520 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002521 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002523 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2524 tx_stats(txo)->tx_compl += work_done;
2525 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2526 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002527}
Sathya Perla3c8def92011-06-12 20:01:58 +00002528
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302529int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002530{
2531 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2532 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002533 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302534 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302535 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002536
Sathya Perla0b545a62012-11-23 00:27:18 +00002537 num_evts = events_get(eqo);
2538
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302539 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2540 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541
Sathya Perla6384a4d2013-10-25 10:40:16 +05302542 if (be_lock_napi(eqo)) {
2543 /* This loop will iterate twice for EQ0 in which
2544 * completions of the last RXQ (default one) are also processed
2545 * For other EQs the loop iterates only once
2546 */
2547 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2548 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2549 max_work = max(work, max_work);
2550 }
2551 be_unlock_napi(eqo);
2552 } else {
2553 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002554 }
2555
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002556 if (is_mcc_eqo(eqo))
2557 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002558
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 if (max_work < budget) {
2560 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002561 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002562 } else {
2563 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002564 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002565 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567}
2568
Sathya Perla6384a4d2013-10-25 10:40:16 +05302569#ifdef CONFIG_NET_RX_BUSY_POLL
2570static int be_busy_poll(struct napi_struct *napi)
2571{
2572 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2573 struct be_adapter *adapter = eqo->adapter;
2574 struct be_rx_obj *rxo;
2575 int i, work = 0;
2576
2577 if (!be_lock_busy_poll(eqo))
2578 return LL_FLUSH_BUSY;
2579
2580 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2581 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2582 if (work)
2583 break;
2584 }
2585
2586 be_unlock_busy_poll(eqo);
2587 return work;
2588}
2589#endif
2590
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002591void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002592{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002593 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2594 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002595 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302596 bool error_detected = false;
2597 struct device *dev = &adapter->pdev->dev;
2598 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002599
Sathya Perlad23e9462012-12-17 19:38:51 +00002600 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002601 return;
2602
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002603 if (lancer_chip(adapter)) {
2604 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2605 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2606 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302607 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002608 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302609 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302610 adapter->hw_error = true;
2611 /* Do not log error messages if its a FW reset */
2612 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2613 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2614 dev_info(dev, "Firmware update in progress\n");
2615 } else {
2616 error_detected = true;
2617 dev_err(dev, "Error detected in the card\n");
2618 dev_err(dev, "ERR: sliport status 0x%x\n",
2619 sliport_status);
2620 dev_err(dev, "ERR: sliport error1 0x%x\n",
2621 sliport_err1);
2622 dev_err(dev, "ERR: sliport error2 0x%x\n",
2623 sliport_err2);
2624 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002625 }
2626 } else {
2627 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302628 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002629 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302630 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002631 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302632 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002633 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302634 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002635
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002636 ue_lo = (ue_lo & ~ue_lo_mask);
2637 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002638
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302639 /* On certain platforms BE hardware can indicate spurious UEs.
2640 * Allow HW to stop working completely in case of a real UE.
2641 * Hence not setting the hw_error for UE detection.
2642 */
2643
2644 if (ue_lo || ue_hi) {
2645 error_detected = true;
2646 dev_err(dev,
2647 "Unrecoverable Error detected in the adapter");
2648 dev_err(dev, "Please reboot server to recover");
2649 if (skyhawk_chip(adapter))
2650 adapter->hw_error = true;
2651 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2652 if (ue_lo & 1)
2653 dev_err(dev, "UE: %s bit set\n",
2654 ue_status_low_desc[i]);
2655 }
2656 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2657 if (ue_hi & 1)
2658 dev_err(dev, "UE: %s bit set\n",
2659 ue_status_hi_desc[i]);
2660 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302661 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002662 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302663 if (error_detected)
2664 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002665}
2666
Sathya Perla8d56ff12009-11-22 22:02:26 +00002667static void be_msix_disable(struct be_adapter *adapter)
2668{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002669 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002670 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002671 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302672 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002673 }
2674}
2675
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002676static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002677{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002678 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002679 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680
Sathya Perla92bf14a2013-08-27 16:57:32 +05302681 /* If RoCE is supported, program the max number of NIC vectors that
2682 * may be configured via set-channels, along with vectors needed for
2683 * RoCe. Else, just program the number we'll use initially.
2684 */
2685 if (be_roce_supported(adapter))
2686 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2687 2 * num_online_cpus());
2688 else
2689 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002690
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002691 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692 adapter->msix_entries[i].entry = i;
2693
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002694 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2695 MIN_MSIX_VECTORS, num_vec);
2696 if (num_vec < 0)
2697 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002698
Sathya Perla92bf14a2013-08-27 16:57:32 +05302699 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2700 adapter->num_msix_roce_vec = num_vec / 2;
2701 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2702 adapter->num_msix_roce_vec);
2703 }
2704
2705 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2706
2707 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2708 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002709 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002710
2711fail:
2712 dev_warn(dev, "MSIx enable failed\n");
2713
2714 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2715 if (!be_physfn(adapter))
2716 return num_vec;
2717 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718}
2719
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002720static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302721 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302723 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724}
2725
2726static int be_msix_register(struct be_adapter *adapter)
2727{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 struct net_device *netdev = adapter->netdev;
2729 struct be_eq_obj *eqo;
2730 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732 for_all_evt_queues(adapter, eqo, i) {
2733 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2734 vec = be_msix_vec_get(adapter, eqo);
2735 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002736 if (status)
2737 goto err_msix;
2738 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002741err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002742 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2743 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2744 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302745 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002746 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 return status;
2748}
2749
2750static int be_irq_register(struct be_adapter *adapter)
2751{
2752 struct net_device *netdev = adapter->netdev;
2753 int status;
2754
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002755 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756 status = be_msix_register(adapter);
2757 if (status == 0)
2758 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002759 /* INTx is not supported for VF */
2760 if (!be_physfn(adapter))
2761 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762 }
2763
Sathya Perlae49cc342012-11-27 19:50:02 +00002764 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765 netdev->irq = adapter->pdev->irq;
2766 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002767 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768 if (status) {
2769 dev_err(&adapter->pdev->dev,
2770 "INTx request IRQ failed - err %d\n", status);
2771 return status;
2772 }
2773done:
2774 adapter->isr_registered = true;
2775 return 0;
2776}
2777
2778static void be_irq_unregister(struct be_adapter *adapter)
2779{
2780 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002782 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783
2784 if (!adapter->isr_registered)
2785 return;
2786
2787 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002788 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002789 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002790 goto done;
2791 }
2792
2793 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002794 for_all_evt_queues(adapter, eqo, i)
2795 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002796
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002797done:
2798 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002799}
2800
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002801static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002802{
2803 struct be_queue_info *q;
2804 struct be_rx_obj *rxo;
2805 int i;
2806
2807 for_all_rx_queues(adapter, rxo, i) {
2808 q = &rxo->q;
2809 if (q->created) {
2810 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002811 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002812 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002813 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002814 }
2815}
2816
Sathya Perla889cd4b2010-05-30 23:33:45 +00002817static int be_close(struct net_device *netdev)
2818{
2819 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 struct be_eq_obj *eqo;
2821 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002822
Kalesh APe1ad8e32014-04-14 16:12:41 +05302823 /* This protection is needed as be_close() may be called even when the
2824 * adapter is in cleared state (after eeh perm failure)
2825 */
2826 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2827 return 0;
2828
Parav Pandit045508a2012-03-26 14:27:13 +00002829 be_roce_dev_close(adapter);
2830
Ivan Veceradff345c52013-11-27 08:59:32 +01002831 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2832 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002833 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302834 be_disable_busy_poll(eqo);
2835 }
David S. Miller71237b62013-11-28 18:53:36 -05002836 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002837 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002838
2839 be_async_mcc_disable(adapter);
2840
2841 /* Wait for all pending tx completions to arrive so that
2842 * all tx skbs are freed.
2843 */
Sathya Perlafba87552013-05-08 02:05:50 +00002844 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302845 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002846
2847 be_rx_qs_destroy(adapter);
2848
Ajit Khaparded11a3472013-11-18 10:44:37 -06002849 for (i = 1; i < (adapter->uc_macs + 1); i++)
2850 be_cmd_pmac_del(adapter, adapter->if_handle,
2851 adapter->pmac_id[i], 0);
2852 adapter->uc_macs = 0;
2853
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002854 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002855 if (msix_enabled(adapter))
2856 synchronize_irq(be_msix_vec_get(adapter, eqo));
2857 else
2858 synchronize_irq(netdev->irq);
2859 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002860 }
2861
Sathya Perla889cd4b2010-05-30 23:33:45 +00002862 be_irq_unregister(adapter);
2863
Sathya Perla482c9e72011-06-29 23:33:17 +00002864 return 0;
2865}
2866
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002868{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002869 struct rss_info *rss = &adapter->rss_info;
2870 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00002871 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002872 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00002873
2874 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2876 sizeof(struct be_eth_rx_d));
2877 if (rc)
2878 return rc;
2879 }
2880
2881 /* The FW would like the default RXQ to be created first */
2882 rxo = default_rxo(adapter);
2883 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2884 adapter->if_handle, false, &rxo->rss_id);
2885 if (rc)
2886 return rc;
2887
2888 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002889 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002890 rx_frag_size, adapter->if_handle,
2891 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002892 if (rc)
2893 return rc;
2894 }
2895
2896 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302897 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2898 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002899 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302900 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002901 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302902 rss->rsstable[j + i] = rxo->rss_id;
2903 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002904 }
2905 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302906 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2907 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002908
2909 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302910 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2911 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302912 } else {
2913 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302914 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302915 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002916
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002917 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302918 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002919 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302920 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302921 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302922 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002923 }
2924
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002925 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302926
Sathya Perla482c9e72011-06-29 23:33:17 +00002927 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302929 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002930 return 0;
2931}
2932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933static int be_open(struct net_device *netdev)
2934{
2935 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002936 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002937 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002938 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002939 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002940 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002943 if (status)
2944 goto err;
2945
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002946 status = be_irq_register(adapter);
2947 if (status)
2948 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002951 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953 for_all_tx_queues(adapter, txo, i)
2954 be_cq_notify(adapter, txo->cq.id, true, 0);
2955
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002956 be_async_mcc_enable(adapter);
2957
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002958 for_all_evt_queues(adapter, eqo, i) {
2959 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302960 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302961 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002962 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002963 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964
Sathya Perla323ff712012-09-28 04:39:43 +00002965 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002966 if (!status)
2967 be_link_status_update(adapter, link_status);
2968
Sathya Perlafba87552013-05-08 02:05:50 +00002969 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002970 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302971
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302972#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302973 if (skyhawk_chip(adapter))
2974 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302975#endif
2976
Sathya Perla889cd4b2010-05-30 23:33:45 +00002977 return 0;
2978err:
2979 be_close(adapter->netdev);
2980 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002981}
2982
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002983static int be_setup_wol(struct be_adapter *adapter, bool enable)
2984{
2985 struct be_dma_mem cmd;
2986 int status = 0;
2987 u8 mac[ETH_ALEN];
2988
2989 memset(mac, 0, ETH_ALEN);
2990
2991 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002992 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2993 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302994 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302995 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002996
2997 if (enable) {
2998 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302999 PCICFG_PM_CONTROL_OFFSET,
3000 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003001 if (status) {
3002 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003003 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003004 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3005 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003006 return status;
3007 }
3008 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303009 adapter->netdev->dev_addr,
3010 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003011 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3012 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3013 } else {
3014 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3015 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3016 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3017 }
3018
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003019 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003020 return status;
3021}
3022
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003023/*
3024 * Generate a seed MAC address from the PF MAC Address using jhash.
3025 * MAC Address for VFs are assigned incrementally starting from the seed.
3026 * These addresses are programmed in the ASIC by the PF and the VF driver
3027 * queries for the MAC address during its probe.
3028 */
Sathya Perla4c876612013-02-03 20:30:11 +00003029static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003030{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003031 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003032 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003033 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003034 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003035
3036 be_vf_eth_addr_generate(adapter, mac);
3037
Sathya Perla11ac75e2011-12-13 00:58:50 +00003038 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303039 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003040 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003041 vf_cfg->if_handle,
3042 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303043 else
3044 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3045 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003046
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003047 if (status)
3048 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303049 "Mac address assignment failed for VF %d\n",
3050 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003051 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003052 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003053
3054 mac[5] += 1;
3055 }
3056 return status;
3057}
3058
Sathya Perla4c876612013-02-03 20:30:11 +00003059static int be_vfs_mac_query(struct be_adapter *adapter)
3060{
3061 int status, vf;
3062 u8 mac[ETH_ALEN];
3063 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003064
3065 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303066 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3067 mac, vf_cfg->if_handle,
3068 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003069 if (status)
3070 return status;
3071 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3072 }
3073 return 0;
3074}
3075
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003076static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003077{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003078 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003079 u32 vf;
3080
Sathya Perla257a3fe2013-06-14 15:54:51 +05303081 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003082 dev_warn(&adapter->pdev->dev,
3083 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003084 goto done;
3085 }
3086
Sathya Perlab4c1df92013-05-08 02:05:47 +00003087 pci_disable_sriov(adapter->pdev);
3088
Sathya Perla11ac75e2011-12-13 00:58:50 +00003089 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303090 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003091 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3092 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303093 else
3094 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3095 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003096
Sathya Perla11ac75e2011-12-13 00:58:50 +00003097 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3098 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003099done:
3100 kfree(adapter->vf_cfg);
3101 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303102 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003103}
3104
Sathya Perla77071332013-08-27 16:57:34 +05303105static void be_clear_queues(struct be_adapter *adapter)
3106{
3107 be_mcc_queues_destroy(adapter);
3108 be_rx_cqs_destroy(adapter);
3109 be_tx_queues_destroy(adapter);
3110 be_evt_queues_destroy(adapter);
3111}
3112
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303113static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003114{
Sathya Perla191eb752012-02-23 18:50:13 +00003115 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3116 cancel_delayed_work_sync(&adapter->work);
3117 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3118 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303119}
3120
Somnath Koturb05004a2013-12-05 12:08:16 +05303121static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303122{
3123 int i;
3124
Somnath Koturb05004a2013-12-05 12:08:16 +05303125 if (adapter->pmac_id) {
3126 for (i = 0; i < (adapter->uc_macs + 1); i++)
3127 be_cmd_pmac_del(adapter, adapter->if_handle,
3128 adapter->pmac_id[i], 0);
3129 adapter->uc_macs = 0;
3130
3131 kfree(adapter->pmac_id);
3132 adapter->pmac_id = NULL;
3133 }
3134}
3135
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303136#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303137static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3138{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003139 struct net_device *netdev = adapter->netdev;
3140
Sathya Perlac9c47142014-03-27 10:46:19 +05303141 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3142 be_cmd_manage_iface(adapter, adapter->if_handle,
3143 OP_CONVERT_TUNNEL_TO_NORMAL);
3144
3145 if (adapter->vxlan_port)
3146 be_cmd_set_vxlan_port(adapter, 0);
3147
3148 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3149 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003150
3151 netdev->hw_enc_features = 0;
3152 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303153 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303154}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303155#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303156
Somnath Koturb05004a2013-12-05 12:08:16 +05303157static int be_clear(struct be_adapter *adapter)
3158{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303159 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003160
Sathya Perla11ac75e2011-12-13 00:58:50 +00003161 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003162 be_vf_clear(adapter);
3163
Vasundhara Volambec84e62014-06-30 13:01:32 +05303164 /* Re-configure FW to distribute resources evenly across max-supported
3165 * number of VFs, only when VFs are not already enabled.
3166 */
3167 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3168 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3169 pci_sriov_get_totalvfs(adapter->pdev));
3170
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303171#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303172 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303173#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303174 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303175 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003176
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003177 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003178
Sathya Perla77071332013-08-27 16:57:34 +05303179 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003180
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003181 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303182 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003183 return 0;
3184}
3185
Kalesh AP0700d812015-01-20 03:51:43 -05003186static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3187 u32 cap_flags, u32 vf)
3188{
3189 u32 en_flags;
3190 int status;
3191
3192 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3193 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3194 BE_IF_FLAGS_RSS;
3195
3196 en_flags &= cap_flags;
3197
3198 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3199 if_handle, vf);
3200
3201 return status;
3202}
3203
Sathya Perla4c876612013-02-03 20:30:11 +00003204static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003205{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303206 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003207 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003208 u32 cap_flags, vf;
3209 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003210
Kalesh AP0700d812015-01-20 03:51:43 -05003211 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003212 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3213 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003214
Sathya Perla4c876612013-02-03 20:30:11 +00003215 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303216 if (!BE3_chip(adapter)) {
3217 status = be_cmd_get_profile_config(adapter, &res,
3218 vf + 1);
3219 if (!status)
3220 cap_flags = res.if_cap_flags;
3221 }
Sathya Perla4c876612013-02-03 20:30:11 +00003222
Kalesh AP0700d812015-01-20 03:51:43 -05003223 status = be_if_create(adapter, &vf_cfg->if_handle,
3224 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003225 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003226 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003227 }
Kalesh AP0700d812015-01-20 03:51:43 -05003228
3229 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003230}
3231
Sathya Perla39f1d942012-05-08 19:41:24 +00003232static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003233{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003234 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003235 int vf;
3236
Sathya Perla39f1d942012-05-08 19:41:24 +00003237 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3238 GFP_KERNEL);
3239 if (!adapter->vf_cfg)
3240 return -ENOMEM;
3241
Sathya Perla11ac75e2011-12-13 00:58:50 +00003242 for_all_vfs(adapter, vf_cfg, vf) {
3243 vf_cfg->if_handle = -1;
3244 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003245 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003246 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003247}
3248
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003249static int be_vf_setup(struct be_adapter *adapter)
3250{
Sathya Perla4c876612013-02-03 20:30:11 +00003251 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303252 struct be_vf_cfg *vf_cfg;
3253 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303254 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003255
Sathya Perla257a3fe2013-06-14 15:54:51 +05303256 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003257
3258 status = be_vf_setup_init(adapter);
3259 if (status)
3260 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003261
Sathya Perla4c876612013-02-03 20:30:11 +00003262 if (old_vfs) {
3263 for_all_vfs(adapter, vf_cfg, vf) {
3264 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3265 if (status)
3266 goto err;
3267 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003268
Sathya Perla4c876612013-02-03 20:30:11 +00003269 status = be_vfs_mac_query(adapter);
3270 if (status)
3271 goto err;
3272 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303273 status = be_vfs_if_create(adapter);
3274 if (status)
3275 goto err;
3276
Sathya Perla39f1d942012-05-08 19:41:24 +00003277 status = be_vf_eth_addr_config(adapter);
3278 if (status)
3279 goto err;
3280 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003281
Sathya Perla11ac75e2011-12-13 00:58:50 +00003282 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303283 /* Allow VFs to programs MAC/VLAN filters */
3284 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3285 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3286 status = be_cmd_set_fn_privileges(adapter,
3287 privileges |
3288 BE_PRIV_FILTMGMT,
3289 vf + 1);
3290 if (!status)
3291 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3292 vf);
3293 }
3294
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303295 /* Allow full available bandwidth */
3296 if (!old_vfs)
3297 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003298
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303299 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303300 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303301 be_cmd_set_logical_link_config(adapter,
3302 IFLA_VF_LINK_STATE_AUTO,
3303 vf+1);
3304 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003305 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003306
3307 if (!old_vfs) {
3308 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3309 if (status) {
3310 dev_err(dev, "SRIOV enable failed\n");
3311 adapter->num_vfs = 0;
3312 goto err;
3313 }
3314 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303315
3316 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003317 return 0;
3318err:
Sathya Perla4c876612013-02-03 20:30:11 +00003319 dev_err(dev, "VF setup failed\n");
3320 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003321 return status;
3322}
3323
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303324/* Converting function_mode bits on BE3 to SH mc_type enums */
3325
3326static u8 be_convert_mc_type(u32 function_mode)
3327{
Suresh Reddy66064db2014-06-23 16:41:29 +05303328 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303329 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303330 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303331 return FLEX10;
3332 else if (function_mode & VNIC_MODE)
3333 return vNIC2;
3334 else if (function_mode & UMC_ENABLED)
3335 return UMC;
3336 else
3337 return MC_NONE;
3338}
3339
Sathya Perla92bf14a2013-08-27 16:57:32 +05303340/* On BE2/BE3 FW does not suggest the supported limits */
3341static void BEx_get_resources(struct be_adapter *adapter,
3342 struct be_resources *res)
3343{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303344 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303345
3346 if (be_physfn(adapter))
3347 res->max_uc_mac = BE_UC_PMAC_COUNT;
3348 else
3349 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3350
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303351 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3352
3353 if (be_is_mc(adapter)) {
3354 /* Assuming that there are 4 channels per port,
3355 * when multi-channel is enabled
3356 */
3357 if (be_is_qnq_mode(adapter))
3358 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3359 else
3360 /* In a non-qnq multichannel mode, the pvid
3361 * takes up one vlan entry
3362 */
3363 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3364 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303365 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303366 }
3367
Sathya Perla92bf14a2013-08-27 16:57:32 +05303368 res->max_mcast_mac = BE_MAX_MC;
3369
Vasundhara Volama5243da2014-03-11 18:53:07 +05303370 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3371 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3372 * *only* if it is RSS-capable.
3373 */
3374 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3375 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303376 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303377 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303378 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3379 struct be_resources super_nic_res = {0};
3380
3381 /* On a SuperNIC profile, the driver needs to use the
3382 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3383 */
3384 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3385 /* Some old versions of BE3 FW don't report max_tx_qs value */
3386 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3387 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303388 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303389 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303390
3391 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3392 !use_sriov && be_physfn(adapter))
3393 res->max_rss_qs = (adapter->be3_native) ?
3394 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3395 res->max_rx_qs = res->max_rss_qs + 1;
3396
Suresh Reddye3dc8672014-01-06 13:02:25 +05303397 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303398 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303399 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3400 else
3401 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303402
3403 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3404 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3405 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3406}
3407
Sathya Perla30128032011-11-10 19:17:57 +00003408static void be_setup_init(struct be_adapter *adapter)
3409{
3410 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003411 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003412 adapter->if_handle = -1;
3413 adapter->be3_native = false;
3414 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003415 if (be_physfn(adapter))
3416 adapter->cmd_privileges = MAX_PRIVILEGES;
3417 else
3418 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003419}
3420
Vasundhara Volambec84e62014-06-30 13:01:32 +05303421static int be_get_sriov_config(struct be_adapter *adapter)
3422{
3423 struct device *dev = &adapter->pdev->dev;
3424 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303425 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303426
3427 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303428 be_cmd_get_profile_config(adapter, &res, 0);
3429
Vasundhara Volambec84e62014-06-30 13:01:32 +05303430 if (BE3_chip(adapter) && !res.max_vfs) {
3431 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3432 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3433 }
3434
Sathya Perlad3d18312014-08-01 17:47:30 +05303435 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303436
3437 if (!be_max_vfs(adapter)) {
3438 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303439 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303440 adapter->num_vfs = 0;
3441 return 0;
3442 }
3443
Sathya Perlad3d18312014-08-01 17:47:30 +05303444 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3445
Vasundhara Volambec84e62014-06-30 13:01:32 +05303446 /* validate num_vfs module param */
3447 old_vfs = pci_num_vf(adapter->pdev);
3448 if (old_vfs) {
3449 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3450 if (old_vfs != num_vfs)
3451 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3452 adapter->num_vfs = old_vfs;
3453 } else {
3454 if (num_vfs > be_max_vfs(adapter)) {
3455 dev_info(dev, "Resources unavailable to init %d VFs\n",
3456 num_vfs);
3457 dev_info(dev, "Limiting to %d VFs\n",
3458 be_max_vfs(adapter));
3459 }
3460 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3461 }
3462
3463 return 0;
3464}
3465
Sathya Perla92bf14a2013-08-27 16:57:32 +05303466static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003467{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303468 struct device *dev = &adapter->pdev->dev;
3469 struct be_resources res = {0};
3470 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003471
Sathya Perla92bf14a2013-08-27 16:57:32 +05303472 if (BEx_chip(adapter)) {
3473 BEx_get_resources(adapter, &res);
3474 adapter->res = res;
3475 }
3476
Sathya Perla92bf14a2013-08-27 16:57:32 +05303477 /* For Lancer, SH etc read per-function resource limits from FW.
3478 * GET_FUNC_CONFIG returns per function guaranteed limits.
3479 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3480 */
Sathya Perla4c876612013-02-03 20:30:11 +00003481 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303482 status = be_cmd_get_func_config(adapter, &res);
3483 if (status)
3484 return status;
3485
3486 /* If RoCE may be enabled stash away half the EQs for RoCE */
3487 if (be_roce_supported(adapter))
3488 res.max_evt_qs /= 2;
3489 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003490 }
3491
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303492 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3493 be_max_txqs(adapter), be_max_rxqs(adapter),
3494 be_max_rss(adapter), be_max_eqs(adapter),
3495 be_max_vfs(adapter));
3496 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3497 be_max_uc(adapter), be_max_mc(adapter),
3498 be_max_vlans(adapter));
3499
Sathya Perla92bf14a2013-08-27 16:57:32 +05303500 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003501}
3502
Sathya Perlad3d18312014-08-01 17:47:30 +05303503static void be_sriov_config(struct be_adapter *adapter)
3504{
3505 struct device *dev = &adapter->pdev->dev;
3506 int status;
3507
3508 status = be_get_sriov_config(adapter);
3509 if (status) {
3510 dev_err(dev, "Failed to query SR-IOV configuration\n");
3511 dev_err(dev, "SR-IOV cannot be enabled\n");
3512 return;
3513 }
3514
3515 /* When the HW is in SRIOV capable configuration, the PF-pool
3516 * resources are equally distributed across the max-number of
3517 * VFs. The user may request only a subset of the max-vfs to be
3518 * enabled. Based on num_vfs, redistribute the resources across
3519 * num_vfs so that each VF will have access to more number of
3520 * resources. This facility is not available in BE3 FW.
3521 * Also, this is done by FW in Lancer chip.
3522 */
3523 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3524 status = be_cmd_set_sriov_config(adapter,
3525 adapter->pool_res,
3526 adapter->num_vfs);
3527 if (status)
3528 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3529 }
3530}
3531
Sathya Perla39f1d942012-05-08 19:41:24 +00003532static int be_get_config(struct be_adapter *adapter)
3533{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303534 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003535 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003536
Kalesh APe97e3cd2014-07-17 16:20:26 +05303537 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003538 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303539 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003540
Vasundhara Volam542963b2014-01-15 13:23:33 +05303541 if (be_physfn(adapter)) {
3542 status = be_cmd_get_active_profile(adapter, &profile_id);
3543 if (!status)
3544 dev_info(&adapter->pdev->dev,
3545 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303546 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303547
Sathya Perlad3d18312014-08-01 17:47:30 +05303548 if (!BE2_chip(adapter) && be_physfn(adapter))
3549 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303550
Sathya Perla92bf14a2013-08-27 16:57:32 +05303551 status = be_get_resources(adapter);
3552 if (status)
3553 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003554
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303555 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3556 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303557 if (!adapter->pmac_id)
3558 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003559
Sathya Perla92bf14a2013-08-27 16:57:32 +05303560 /* Sanitize cfg_num_qs based on HW and platform limits */
3561 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3562
3563 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003564}
3565
Sathya Perla95046b92013-07-23 15:25:02 +05303566static int be_mac_setup(struct be_adapter *adapter)
3567{
3568 u8 mac[ETH_ALEN];
3569 int status;
3570
3571 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3572 status = be_cmd_get_perm_mac(adapter, mac);
3573 if (status)
3574 return status;
3575
3576 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3577 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3578 } else {
3579 /* Maybe the HW was reset; dev_addr must be re-programmed */
3580 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3581 }
3582
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003583 /* For BE3-R VFs, the PF programs the initial MAC address */
3584 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3585 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3586 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303587 return 0;
3588}
3589
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303590static void be_schedule_worker(struct be_adapter *adapter)
3591{
3592 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3593 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3594}
3595
Sathya Perla77071332013-08-27 16:57:34 +05303596static int be_setup_queues(struct be_adapter *adapter)
3597{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303598 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303599 int status;
3600
3601 status = be_evt_queues_create(adapter);
3602 if (status)
3603 goto err;
3604
3605 status = be_tx_qs_create(adapter);
3606 if (status)
3607 goto err;
3608
3609 status = be_rx_cqs_create(adapter);
3610 if (status)
3611 goto err;
3612
3613 status = be_mcc_queues_create(adapter);
3614 if (status)
3615 goto err;
3616
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303617 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3618 if (status)
3619 goto err;
3620
3621 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3622 if (status)
3623 goto err;
3624
Sathya Perla77071332013-08-27 16:57:34 +05303625 return 0;
3626err:
3627 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3628 return status;
3629}
3630
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303631int be_update_queues(struct be_adapter *adapter)
3632{
3633 struct net_device *netdev = adapter->netdev;
3634 int status;
3635
3636 if (netif_running(netdev))
3637 be_close(netdev);
3638
3639 be_cancel_worker(adapter);
3640
3641 /* If any vectors have been shared with RoCE we cannot re-program
3642 * the MSIx table.
3643 */
3644 if (!adapter->num_msix_roce_vec)
3645 be_msix_disable(adapter);
3646
3647 be_clear_queues(adapter);
3648
3649 if (!msix_enabled(adapter)) {
3650 status = be_msix_enable(adapter);
3651 if (status)
3652 return status;
3653 }
3654
3655 status = be_setup_queues(adapter);
3656 if (status)
3657 return status;
3658
3659 be_schedule_worker(adapter);
3660
3661 if (netif_running(netdev))
3662 status = be_open(netdev);
3663
3664 return status;
3665}
3666
Sathya Perla5fb379e2009-06-18 00:02:59 +00003667static int be_setup(struct be_adapter *adapter)
3668{
Sathya Perla39f1d942012-05-08 19:41:24 +00003669 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003670 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003671
Sathya Perla30128032011-11-10 19:17:57 +00003672 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003673
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003674 if (!lancer_chip(adapter))
3675 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003676
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003677 status = be_get_config(adapter);
3678 if (status)
3679 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003680
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003681 status = be_msix_enable(adapter);
3682 if (status)
3683 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003684
Kalesh AP0700d812015-01-20 03:51:43 -05003685 status = be_if_create(adapter, &adapter->if_handle,
3686 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003687 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003688 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003689
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303690 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3691 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303692 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303693 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003694 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003695 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003696
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003697 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003698
Sathya Perla95046b92013-07-23 15:25:02 +05303699 status = be_mac_setup(adapter);
3700 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003701 goto err;
3702
Kalesh APe97e3cd2014-07-17 16:20:26 +05303703 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303704 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003705
Somnath Koture9e2a902013-10-24 14:37:53 +05303706 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303707 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303708 adapter->fw_ver);
3709 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3710 }
3711
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003712 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003713 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003714
3715 be_set_rx_mode(adapter->netdev);
3716
Suresh Reddy76a9e082014-01-15 13:23:40 +05303717 be_cmd_get_acpi_wol_cap(adapter);
3718
Kalesh AP00d594c2015-01-20 03:51:44 -05003719 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3720 adapter->rx_fc);
3721 if (status)
3722 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3723 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003724
Kalesh AP00d594c2015-01-20 03:51:44 -05003725 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3726 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003727
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303728 if (be_physfn(adapter))
3729 be_cmd_set_logical_link_config(adapter,
3730 IFLA_VF_LINK_STATE_AUTO, 0);
3731
Vasundhara Volambec84e62014-06-30 13:01:32 +05303732 if (adapter->num_vfs)
3733 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003734
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003735 status = be_cmd_get_phy_info(adapter);
3736 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003737 adapter->phy.fc_autoneg = 1;
3738
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303739 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303740 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003741 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003742err:
3743 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003744 return status;
3745}
3746
Ivan Vecera66268732011-12-08 01:31:21 +00003747#ifdef CONFIG_NET_POLL_CONTROLLER
3748static void be_netpoll(struct net_device *netdev)
3749{
3750 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003751 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003752 int i;
3753
Sathya Perlae49cc342012-11-27 19:50:02 +00003754 for_all_evt_queues(adapter, eqo, i) {
3755 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3756 napi_schedule(&eqo->napi);
3757 }
Ivan Vecera66268732011-12-08 01:31:21 +00003758}
3759#endif
3760
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303761static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003762
Sathya Perla306f1342011-08-02 19:57:45 +00003763static bool phy_flashing_required(struct be_adapter *adapter)
3764{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05003765 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003766 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003767}
3768
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003769static bool is_comp_in_ufi(struct be_adapter *adapter,
3770 struct flash_section_info *fsec, int type)
3771{
3772 int i = 0, img_type = 0;
3773 struct flash_section_info_g2 *fsec_g2 = NULL;
3774
Sathya Perlaca34fe32012-11-06 17:48:56 +00003775 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003776 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3777
3778 for (i = 0; i < MAX_FLASH_COMP; i++) {
3779 if (fsec_g2)
3780 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3781 else
3782 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3783
3784 if (img_type == type)
3785 return true;
3786 }
3787 return false;
3788
3789}
3790
Jingoo Han4188e7d2013-08-05 18:02:02 +09003791static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303792 int header_size,
3793 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003794{
3795 struct flash_section_info *fsec = NULL;
3796 const u8 *p = fw->data;
3797
3798 p += header_size;
3799 while (p < (fw->data + fw->size)) {
3800 fsec = (struct flash_section_info *)p;
3801 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3802 return fsec;
3803 p += 32;
3804 }
3805 return NULL;
3806}
3807
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303808static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3809 u32 img_offset, u32 img_size, int hdr_size,
3810 u16 img_optype, bool *crc_match)
3811{
3812 u32 crc_offset;
3813 int status;
3814 u8 crc[4];
3815
3816 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3817 if (status)
3818 return status;
3819
3820 crc_offset = hdr_size + img_offset + img_size - 4;
3821
3822 /* Skip flashing, if crc of flashed region matches */
3823 if (!memcmp(crc, p + crc_offset, 4))
3824 *crc_match = true;
3825 else
3826 *crc_match = false;
3827
3828 return status;
3829}
3830
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003831static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303832 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003833{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003834 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303835 u32 total_bytes, flash_op, num_bytes;
3836 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003837
3838 total_bytes = img_size;
3839 while (total_bytes) {
3840 num_bytes = min_t(u32, 32*1024, total_bytes);
3841
3842 total_bytes -= num_bytes;
3843
3844 if (!total_bytes) {
3845 if (optype == OPTYPE_PHY_FW)
3846 flash_op = FLASHROM_OPER_PHY_FLASH;
3847 else
3848 flash_op = FLASHROM_OPER_FLASH;
3849 } else {
3850 if (optype == OPTYPE_PHY_FW)
3851 flash_op = FLASHROM_OPER_PHY_SAVE;
3852 else
3853 flash_op = FLASHROM_OPER_SAVE;
3854 }
3855
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003856 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003857 img += num_bytes;
3858 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303859 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303860 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303861 optype == OPTYPE_PHY_FW)
3862 break;
3863 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003864 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003865 }
3866 return 0;
3867}
3868
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003869/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003870static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303871 const struct firmware *fw,
3872 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003873{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003874 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303875 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003876 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303877 int status, i, filehdr_size, num_comp;
3878 const struct flash_comp *pflashcomp;
3879 bool crc_match;
3880 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003881
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003882 struct flash_comp gen3_flash_types[] = {
3883 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3884 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3885 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3886 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3887 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3888 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3889 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3890 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3891 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3892 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3893 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3894 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3895 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3896 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3897 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3898 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3899 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3900 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3901 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3902 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003903 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003904
3905 struct flash_comp gen2_flash_types[] = {
3906 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3907 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3908 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3909 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3910 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3911 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3912 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3913 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3914 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3915 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3916 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3917 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3918 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3919 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3920 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3921 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003922 };
3923
Sathya Perlaca34fe32012-11-06 17:48:56 +00003924 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003925 pflashcomp = gen3_flash_types;
3926 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003927 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003928 } else {
3929 pflashcomp = gen2_flash_types;
3930 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003931 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003932 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003933
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003934 /* Get flash section info*/
3935 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3936 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303937 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003938 return -1;
3939 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003940 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003941 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003942 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003943
3944 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3945 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3946 continue;
3947
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003948 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3949 !phy_flashing_required(adapter))
3950 continue;
3951
3952 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303953 status = be_check_flash_crc(adapter, fw->data,
3954 pflashcomp[i].offset,
3955 pflashcomp[i].size,
3956 filehdr_size +
3957 img_hdrs_size,
3958 OPTYPE_REDBOOT, &crc_match);
3959 if (status) {
3960 dev_err(dev,
3961 "Could not get CRC for 0x%x region\n",
3962 pflashcomp[i].optype);
3963 continue;
3964 }
3965
3966 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003967 continue;
3968 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003969
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303970 p = fw->data + filehdr_size + pflashcomp[i].offset +
3971 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003972 if (p + pflashcomp[i].size > fw->data + fw->size)
3973 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003974
3975 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303976 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003977 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303978 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003979 pflashcomp[i].img_type);
3980 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003981 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003982 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003983 return 0;
3984}
3985
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303986static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3987{
3988 u32 img_type = le32_to_cpu(fsec_entry.type);
3989 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3990
3991 if (img_optype != 0xFFFF)
3992 return img_optype;
3993
3994 switch (img_type) {
3995 case IMAGE_FIRMWARE_iSCSI:
3996 img_optype = OPTYPE_ISCSI_ACTIVE;
3997 break;
3998 case IMAGE_BOOT_CODE:
3999 img_optype = OPTYPE_REDBOOT;
4000 break;
4001 case IMAGE_OPTION_ROM_ISCSI:
4002 img_optype = OPTYPE_BIOS;
4003 break;
4004 case IMAGE_OPTION_ROM_PXE:
4005 img_optype = OPTYPE_PXE_BIOS;
4006 break;
4007 case IMAGE_OPTION_ROM_FCoE:
4008 img_optype = OPTYPE_FCOE_BIOS;
4009 break;
4010 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4011 img_optype = OPTYPE_ISCSI_BACKUP;
4012 break;
4013 case IMAGE_NCSI:
4014 img_optype = OPTYPE_NCSI_FW;
4015 break;
4016 case IMAGE_FLASHISM_JUMPVECTOR:
4017 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4018 break;
4019 case IMAGE_FIRMWARE_PHY:
4020 img_optype = OPTYPE_SH_PHY_FW;
4021 break;
4022 case IMAGE_REDBOOT_DIR:
4023 img_optype = OPTYPE_REDBOOT_DIR;
4024 break;
4025 case IMAGE_REDBOOT_CONFIG:
4026 img_optype = OPTYPE_REDBOOT_CONFIG;
4027 break;
4028 case IMAGE_UFI_DIR:
4029 img_optype = OPTYPE_UFI_DIR;
4030 break;
4031 default:
4032 break;
4033 }
4034
4035 return img_optype;
4036}
4037
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004038static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304039 const struct firmware *fw,
4040 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004041{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004042 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304043 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004044 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304045 u32 img_offset, img_size, img_type;
4046 int status, i, filehdr_size;
4047 bool crc_match, old_fw_img;
4048 u16 img_optype;
4049 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004050
4051 filehdr_size = sizeof(struct flash_file_hdr_g3);
4052 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4053 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304054 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304055 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004056 }
4057
4058 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4059 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4060 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304061 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4062 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4063 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004064
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304065 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004066 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304067 /* Don't bother verifying CRC if an old FW image is being
4068 * flashed
4069 */
4070 if (old_fw_img)
4071 goto flash;
4072
4073 status = be_check_flash_crc(adapter, fw->data, img_offset,
4074 img_size, filehdr_size +
4075 img_hdrs_size, img_optype,
4076 &crc_match);
4077 /* The current FW image on the card does not recognize the new
4078 * FLASH op_type. The FW download is partially complete.
4079 * Reboot the server now to enable FW image to recognize the
4080 * new FLASH op_type. To complete the remaining process,
4081 * download the same FW again after the reboot.
4082 */
Kalesh AP4c600052014-05-30 19:06:26 +05304083 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4084 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304085 dev_err(dev, "Flash incomplete. Reset the server\n");
4086 dev_err(dev, "Download FW image again after reset\n");
4087 return -EAGAIN;
4088 } else if (status) {
4089 dev_err(dev, "Could not get CRC for 0x%x region\n",
4090 img_optype);
4091 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004092 }
4093
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304094 if (crc_match)
4095 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004096
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304097flash:
4098 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004099 if (p + img_size > fw->data + fw->size)
4100 return -1;
4101
4102 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304103 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4104 * UFI_DIR region
4105 */
Kalesh AP4c600052014-05-30 19:06:26 +05304106 if (old_fw_img &&
4107 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4108 (img_optype == OPTYPE_UFI_DIR &&
4109 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304110 continue;
4111 } else if (status) {
4112 dev_err(dev, "Flashing section type 0x%x failed\n",
4113 img_type);
4114 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004115 }
4116 }
4117 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004118}
4119
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004120static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304121 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004122{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004123#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4124#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304125 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004126 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004127 const u8 *data_ptr = NULL;
4128 u8 *dest_image_ptr = NULL;
4129 size_t image_size = 0;
4130 u32 chunk_size = 0;
4131 u32 data_written = 0;
4132 u32 offset = 0;
4133 int status = 0;
4134 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004135 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004136
4137 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304138 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304139 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004140 }
4141
4142 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4143 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304144 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004145 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304146 if (!flash_cmd.va)
4147 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004148
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004149 dest_image_ptr = flash_cmd.va +
4150 sizeof(struct lancer_cmd_req_write_object);
4151 image_size = fw->size;
4152 data_ptr = fw->data;
4153
4154 while (image_size) {
4155 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4156
4157 /* Copy the image chunk content. */
4158 memcpy(dest_image_ptr, data_ptr, chunk_size);
4159
4160 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004161 chunk_size, offset,
4162 LANCER_FW_DOWNLOAD_LOCATION,
4163 &data_written, &change_status,
4164 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004165 if (status)
4166 break;
4167
4168 offset += data_written;
4169 data_ptr += data_written;
4170 image_size -= data_written;
4171 }
4172
4173 if (!status) {
4174 /* Commit the FW written */
4175 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004176 0, offset,
4177 LANCER_FW_DOWNLOAD_LOCATION,
4178 &data_written, &change_status,
4179 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004180 }
4181
Kalesh APbb864e02014-09-02 09:56:51 +05304182 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004183 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304184 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304185 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004186 }
4187
Kalesh APbb864e02014-09-02 09:56:51 +05304188 dev_info(dev, "Firmware flashed successfully\n");
4189
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004190 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304191 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004192 status = lancer_physdev_ctrl(adapter,
4193 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004194 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304195 dev_err(dev, "Adapter busy, could not reset FW\n");
4196 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004197 }
4198 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304199 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004200 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304201
4202 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004203}
4204
Sathya Perlaca34fe32012-11-06 17:48:56 +00004205#define UFI_TYPE2 2
4206#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004207#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004208#define UFI_TYPE4 4
4209static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004210 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004211{
Kalesh APddf11692014-07-17 16:20:28 +05304212 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004213 goto be_get_ufi_exit;
4214
Sathya Perlaca34fe32012-11-06 17:48:56 +00004215 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4216 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004217 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4218 if (fhdr->asic_type_rev == 0x10)
4219 return UFI_TYPE3R;
4220 else
4221 return UFI_TYPE3;
4222 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004223 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004224
4225be_get_ufi_exit:
4226 dev_err(&adapter->pdev->dev,
4227 "UFI and Interface are not compatible for flashing\n");
4228 return -1;
4229}
4230
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004231static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4232{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004233 struct flash_file_hdr_g3 *fhdr3;
4234 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004235 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004236 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004237 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004238
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004239 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004240 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4241 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004242 if (!flash_cmd.va) {
4243 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004244 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004245 }
4246
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004247 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004248 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004249
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004250 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004251
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004252 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4253 for (i = 0; i < num_imgs; i++) {
4254 img_hdr_ptr = (struct image_hdr *)(fw->data +
4255 (sizeof(struct flash_file_hdr_g3) +
4256 i * sizeof(struct image_hdr)));
4257 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004258 switch (ufi_type) {
4259 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004260 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304261 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004262 break;
4263 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004264 status = be_flash_BEx(adapter, fw, &flash_cmd,
4265 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004266 break;
4267 case UFI_TYPE3:
4268 /* Do not flash this ufi on BE3-R cards */
4269 if (adapter->asic_rev < 0x10)
4270 status = be_flash_BEx(adapter, fw,
4271 &flash_cmd,
4272 num_imgs);
4273 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304274 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004275 dev_err(&adapter->pdev->dev,
4276 "Can't load BE3 UFI on BE3R\n");
4277 }
4278 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004279 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004280 }
4281
Sathya Perlaca34fe32012-11-06 17:48:56 +00004282 if (ufi_type == UFI_TYPE2)
4283 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004284 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304285 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004286
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004287 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4288 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004289 if (status) {
4290 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004291 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004292 }
4293
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004294 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004295
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004296be_fw_exit:
4297 return status;
4298}
4299
4300int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4301{
4302 const struct firmware *fw;
4303 int status;
4304
4305 if (!netif_running(adapter->netdev)) {
4306 dev_err(&adapter->pdev->dev,
4307 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304308 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004309 }
4310
4311 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4312 if (status)
4313 goto fw_exit;
4314
4315 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4316
4317 if (lancer_chip(adapter))
4318 status = lancer_fw_download(adapter, fw);
4319 else
4320 status = be_fw_download(adapter, fw);
4321
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004322 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304323 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004324
Ajit Khaparde84517482009-09-04 03:12:16 +00004325fw_exit:
4326 release_firmware(fw);
4327 return status;
4328}
4329
Sathya Perla748b5392014-05-09 13:29:13 +05304330static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004331{
4332 struct be_adapter *adapter = netdev_priv(dev);
4333 struct nlattr *attr, *br_spec;
4334 int rem;
4335 int status = 0;
4336 u16 mode = 0;
4337
4338 if (!sriov_enabled(adapter))
4339 return -EOPNOTSUPP;
4340
4341 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004342 if (!br_spec)
4343 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004344
4345 nla_for_each_nested(attr, br_spec, rem) {
4346 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4347 continue;
4348
Thomas Grafb7c1a312014-11-26 13:42:17 +01004349 if (nla_len(attr) < sizeof(mode))
4350 return -EINVAL;
4351
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004352 mode = nla_get_u16(attr);
4353 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4354 return -EINVAL;
4355
4356 status = be_cmd_set_hsw_config(adapter, 0, 0,
4357 adapter->if_handle,
4358 mode == BRIDGE_MODE_VEPA ?
4359 PORT_FWD_TYPE_VEPA :
4360 PORT_FWD_TYPE_VEB);
4361 if (status)
4362 goto err;
4363
4364 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4365 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4366
4367 return status;
4368 }
4369err:
4370 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4371 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4372
4373 return status;
4374}
4375
4376static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304377 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004378{
4379 struct be_adapter *adapter = netdev_priv(dev);
4380 int status = 0;
4381 u8 hsw_mode;
4382
4383 if (!sriov_enabled(adapter))
4384 return 0;
4385
4386 /* BE and Lancer chips support VEB mode only */
4387 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4388 hsw_mode = PORT_FWD_TYPE_VEB;
4389 } else {
4390 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4391 adapter->if_handle, &hsw_mode);
4392 if (status)
4393 return 0;
4394 }
4395
4396 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4397 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004398 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4399 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004400}
4401
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304402#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004403/* VxLAN offload Notes:
4404 *
4405 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4406 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4407 * is expected to work across all types of IP tunnels once exported. Skyhawk
4408 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4409 * offloads in hw_enc_features only when a VxLAN port is added. Note this only
4410 * ensures that other tunnels work fine while VxLAN offloads are not enabled.
4411 *
4412 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4413 * adds more than one port, disable offloads and don't re-enable them again
4414 * until after all the tunnels are removed.
4415 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304416static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4417 __be16 port)
4418{
4419 struct be_adapter *adapter = netdev_priv(netdev);
4420 struct device *dev = &adapter->pdev->dev;
4421 int status;
4422
4423 if (lancer_chip(adapter) || BEx_chip(adapter))
4424 return;
4425
4426 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304427 dev_info(dev,
4428 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004429 dev_info(dev, "Disabling VxLAN offloads\n");
4430 adapter->vxlan_port_count++;
4431 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304432 }
4433
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004434 if (adapter->vxlan_port_count++ >= 1)
4435 return;
4436
Sathya Perlac9c47142014-03-27 10:46:19 +05304437 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4438 OP_CONVERT_NORMAL_TO_TUNNEL);
4439 if (status) {
4440 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4441 goto err;
4442 }
4443
4444 status = be_cmd_set_vxlan_port(adapter, port);
4445 if (status) {
4446 dev_warn(dev, "Failed to add VxLAN port\n");
4447 goto err;
4448 }
4449 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4450 adapter->vxlan_port = port;
4451
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004452 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4453 NETIF_F_TSO | NETIF_F_TSO6 |
4454 NETIF_F_GSO_UDP_TUNNEL;
4455 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304456 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004457
Sathya Perlac9c47142014-03-27 10:46:19 +05304458 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4459 be16_to_cpu(port));
4460 return;
4461err:
4462 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304463}
4464
4465static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4466 __be16 port)
4467{
4468 struct be_adapter *adapter = netdev_priv(netdev);
4469
4470 if (lancer_chip(adapter) || BEx_chip(adapter))
4471 return;
4472
4473 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004474 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304475
4476 be_disable_vxlan_offloads(adapter);
4477
4478 dev_info(&adapter->pdev->dev,
4479 "Disabled VxLAN offloads for UDP port %d\n",
4480 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004481done:
4482 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304483}
Joe Stringer725d5482014-11-13 16:38:13 -08004484
Jesse Gross5f352272014-12-23 22:37:26 -08004485static netdev_features_t be_features_check(struct sk_buff *skb,
4486 struct net_device *dev,
4487 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004488{
Jesse Gross5f352272014-12-23 22:37:26 -08004489 return vxlan_features_check(skb, features);
Joe Stringer725d5482014-11-13 16:38:13 -08004490}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304491#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304492
stephen hemmingere5686ad2012-01-05 19:10:25 +00004493static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004494 .ndo_open = be_open,
4495 .ndo_stop = be_close,
4496 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004497 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004498 .ndo_set_mac_address = be_mac_addr_set,
4499 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004500 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004501 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004502 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4503 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004504 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004505 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004506 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004507 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304508 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004509#ifdef CONFIG_NET_POLL_CONTROLLER
4510 .ndo_poll_controller = be_netpoll,
4511#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004512 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4513 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304514#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304515 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304516#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304517#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304518 .ndo_add_vxlan_port = be_add_vxlan_port,
4519 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004520 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304521#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004522};
4523
4524static void be_netdev_init(struct net_device *netdev)
4525{
4526 struct be_adapter *adapter = netdev_priv(netdev);
4527
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004528 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004529 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004530 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004531 if (be_multi_rxq(adapter))
4532 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004533
4534 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004535 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004536
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004537 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004538 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004539
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004540 netdev->priv_flags |= IFF_UNICAST_FLT;
4541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542 netdev->flags |= IFF_MULTICAST;
4543
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004544 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004545
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004546 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004547
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004548 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549}
4550
4551static void be_unmap_pci_bars(struct be_adapter *adapter)
4552{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004553 if (adapter->csr)
4554 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004555 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004556 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004557}
4558
Sathya Perlace66f782012-11-06 17:48:58 +00004559static int db_bar(struct be_adapter *adapter)
4560{
4561 if (lancer_chip(adapter) || !be_physfn(adapter))
4562 return 0;
4563 else
4564 return 4;
4565}
4566
4567static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004568{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004569 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004570 adapter->roce_db.size = 4096;
4571 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4572 db_bar(adapter));
4573 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4574 db_bar(adapter));
4575 }
Parav Pandit045508a2012-03-26 14:27:13 +00004576 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004577}
4578
4579static int be_map_pci_bars(struct be_adapter *adapter)
4580{
4581 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004582
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004583 if (BEx_chip(adapter) && be_physfn(adapter)) {
4584 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304585 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004586 return -ENOMEM;
4587 }
4588
Sathya Perlace66f782012-11-06 17:48:58 +00004589 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304590 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004591 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004592 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004593
4594 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004595 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004597pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304598 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004599 be_unmap_pci_bars(adapter);
4600 return -ENOMEM;
4601}
4602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004603static void be_ctrl_cleanup(struct be_adapter *adapter)
4604{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004605 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004606
4607 be_unmap_pci_bars(adapter);
4608
4609 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004610 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4611 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004612
Sathya Perla5b8821b2011-08-02 19:57:44 +00004613 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004614 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004615 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4616 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004617}
4618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004619static int be_ctrl_init(struct be_adapter *adapter)
4620{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004621 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4622 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004623 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004624 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004626
Sathya Perlace66f782012-11-06 17:48:58 +00004627 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4628 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4629 SLI_INTF_FAMILY_SHIFT;
4630 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4631
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004632 status = be_map_pci_bars(adapter);
4633 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004634 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004635
4636 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004637 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4638 mbox_mem_alloc->size,
4639 &mbox_mem_alloc->dma,
4640 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004641 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004642 status = -ENOMEM;
4643 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004644 }
4645 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4646 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4647 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4648 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004649
Sathya Perla5b8821b2011-08-02 19:57:44 +00004650 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004651 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4652 rx_filter->size, &rx_filter->dma,
4653 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304654 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004655 status = -ENOMEM;
4656 goto free_mbox;
4657 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004658
Ivan Vecera29849612010-12-14 05:43:19 +00004659 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004660 spin_lock_init(&adapter->mcc_lock);
4661 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004662
Suresh Reddy5eeff632014-01-06 13:02:24 +05304663 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004664 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004665 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004666
4667free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004668 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4669 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004670
4671unmap_pci_bars:
4672 be_unmap_pci_bars(adapter);
4673
4674done:
4675 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004676}
4677
4678static void be_stats_cleanup(struct be_adapter *adapter)
4679{
Sathya Perla3abcded2010-10-03 22:12:27 -07004680 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004681
4682 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004683 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4684 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004685}
4686
4687static int be_stats_init(struct be_adapter *adapter)
4688{
Sathya Perla3abcded2010-10-03 22:12:27 -07004689 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004690
Sathya Perlaca34fe32012-11-06 17:48:56 +00004691 if (lancer_chip(adapter))
4692 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4693 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004694 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004695 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004696 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004697 else
4698 /* ALL non-BE ASICs */
4699 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004700
Joe Perchesede23fa82013-08-26 22:45:23 -07004701 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4702 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304703 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304704 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004705 return 0;
4706}
4707
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004708static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004709{
4710 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004711
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004712 if (!adapter)
4713 return;
4714
Parav Pandit045508a2012-03-26 14:27:13 +00004715 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004716 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004717
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004718 cancel_delayed_work_sync(&adapter->func_recovery_work);
4719
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004720 unregister_netdev(adapter->netdev);
4721
Sathya Perla5fb379e2009-06-18 00:02:59 +00004722 be_clear(adapter);
4723
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004724 /* tell fw we're done with firing cmds */
4725 be_cmd_fw_clean(adapter);
4726
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004727 be_stats_cleanup(adapter);
4728
4729 be_ctrl_cleanup(adapter);
4730
Sathya Perlad6b6d982012-09-05 01:56:48 +00004731 pci_disable_pcie_error_reporting(pdev);
4732
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004733 pci_release_regions(pdev);
4734 pci_disable_device(pdev);
4735
4736 free_netdev(adapter->netdev);
4737}
4738
Sathya Perla39f1d942012-05-08 19:41:24 +00004739static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004740{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304741 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004742
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004743 status = be_cmd_get_cntl_attributes(adapter);
4744 if (status)
4745 return status;
4746
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004747 /* Must be a power of 2 or else MODULO will BUG_ON */
4748 adapter->be_get_temp_freq = 64;
4749
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304750 if (BEx_chip(adapter)) {
4751 level = be_cmd_get_fw_log_level(adapter);
4752 adapter->msg_enable =
4753 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4754 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004755
Sathya Perla92bf14a2013-08-27 16:57:32 +05304756 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004757 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004758}
4759
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004760static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004761{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004762 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004763 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004764
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004765 status = lancer_test_and_set_rdy_state(adapter);
4766 if (status)
4767 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004768
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004769 if (netif_running(adapter->netdev))
4770 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004771
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004772 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004773
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004774 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004775
4776 status = be_setup(adapter);
4777 if (status)
4778 goto err;
4779
4780 if (netif_running(adapter->netdev)) {
4781 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004782 if (status)
4783 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004784 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004785
Somnath Kotur4bebb562013-12-05 12:07:55 +05304786 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004787 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004788err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004789 if (status == -EAGAIN)
4790 dev_err(dev, "Waiting for resource provisioning\n");
4791 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304792 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004793
4794 return status;
4795}
4796
4797static void be_func_recovery_task(struct work_struct *work)
4798{
4799 struct be_adapter *adapter =
4800 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004801 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004802
4803 be_detect_error(adapter);
4804
4805 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004806 rtnl_lock();
4807 netif_device_detach(adapter->netdev);
4808 rtnl_unlock();
4809
4810 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004811 if (!status)
4812 netif_device_attach(adapter->netdev);
4813 }
4814
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004815 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4816 * no need to attempt further recovery.
4817 */
4818 if (!status || status == -EAGAIN)
4819 schedule_delayed_work(&adapter->func_recovery_work,
4820 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004821}
4822
4823static void be_worker(struct work_struct *work)
4824{
4825 struct be_adapter *adapter =
4826 container_of(work, struct be_adapter, work.work);
4827 struct be_rx_obj *rxo;
4828 int i;
4829
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004830 /* when interrupts are not yet enabled, just reap any pending
4831 * mcc completions */
4832 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004833 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004834 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004835 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004836 goto reschedule;
4837 }
4838
4839 if (!adapter->stats_cmd_sent) {
4840 if (lancer_chip(adapter))
4841 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304842 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004843 else
4844 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4845 }
4846
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304847 if (be_physfn(adapter) &&
4848 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004849 be_cmd_get_die_temperature(adapter);
4850
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004851 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304852 /* Replenish RX-queues starved due to memory
4853 * allocation failures.
4854 */
4855 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304856 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004857 }
4858
Sathya Perla2632baf2013-10-01 16:00:00 +05304859 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004860
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004861reschedule:
4862 adapter->work_counter++;
4863 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4864}
4865
Sathya Perla257a3fe2013-06-14 15:54:51 +05304866/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004867static bool be_reset_required(struct be_adapter *adapter)
4868{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304869 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004870}
4871
Sathya Perlad3791422012-09-28 04:39:44 +00004872static char *mc_name(struct be_adapter *adapter)
4873{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304874 char *str = ""; /* default */
4875
4876 switch (adapter->mc_type) {
4877 case UMC:
4878 str = "UMC";
4879 break;
4880 case FLEX10:
4881 str = "FLEX10";
4882 break;
4883 case vNIC1:
4884 str = "vNIC-1";
4885 break;
4886 case nPAR:
4887 str = "nPAR";
4888 break;
4889 case UFP:
4890 str = "UFP";
4891 break;
4892 case vNIC2:
4893 str = "vNIC-2";
4894 break;
4895 default:
4896 str = "";
4897 }
4898
4899 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004900}
4901
4902static inline char *func_name(struct be_adapter *adapter)
4903{
4904 return be_physfn(adapter) ? "PF" : "VF";
4905}
4906
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004907static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004908{
4909 int status = 0;
4910 struct be_adapter *adapter;
4911 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004912 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004913
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304914 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4915
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004916 status = pci_enable_device(pdev);
4917 if (status)
4918 goto do_none;
4919
4920 status = pci_request_regions(pdev, DRV_NAME);
4921 if (status)
4922 goto disable_dev;
4923 pci_set_master(pdev);
4924
Sathya Perla7f640062012-06-05 19:37:20 +00004925 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304926 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004927 status = -ENOMEM;
4928 goto rel_reg;
4929 }
4930 adapter = netdev_priv(netdev);
4931 adapter->pdev = pdev;
4932 pci_set_drvdata(pdev, adapter);
4933 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004934 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004935
Russell King4c15c242013-06-26 23:49:11 +01004936 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004937 if (!status) {
4938 netdev->features |= NETIF_F_HIGHDMA;
4939 } else {
Russell King4c15c242013-06-26 23:49:11 +01004940 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004941 if (status) {
4942 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4943 goto free_netdev;
4944 }
4945 }
4946
Kalesh AP2f951a92014-09-12 17:39:21 +05304947 status = pci_enable_pcie_error_reporting(pdev);
4948 if (!status)
4949 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004950
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004951 status = be_ctrl_init(adapter);
4952 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004953 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004954
Sathya Perla2243e2e2009-11-22 22:02:03 +00004955 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004956 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004957 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004958 if (status)
4959 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004960 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004961
Sathya Perla39f1d942012-05-08 19:41:24 +00004962 if (be_reset_required(adapter)) {
4963 status = be_cmd_reset_function(adapter);
4964 if (status)
4965 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004966
Kalesh AP2d177be2013-04-28 22:22:29 +00004967 /* Wait for interrupts to quiesce after an FLR */
4968 msleep(100);
4969 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004970
4971 /* Allow interrupts for other ULPs running on NIC function */
4972 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004973
Kalesh AP2d177be2013-04-28 22:22:29 +00004974 /* tell fw we're ready to fire cmds */
4975 status = be_cmd_fw_init(adapter);
4976 if (status)
4977 goto ctrl_clean;
4978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004979 status = be_stats_init(adapter);
4980 if (status)
4981 goto ctrl_clean;
4982
Sathya Perla39f1d942012-05-08 19:41:24 +00004983 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004984 if (status)
4985 goto stats_clean;
4986
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004987 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004988 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05304989 adapter->rx_fc = true;
4990 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004991
Sathya Perla5fb379e2009-06-18 00:02:59 +00004992 status = be_setup(adapter);
4993 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004994 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004995
Sathya Perla3abcded2010-10-03 22:12:27 -07004996 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004997 status = register_netdev(netdev);
4998 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004999 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005000
Parav Pandit045508a2012-03-26 14:27:13 +00005001 be_roce_dev_add(adapter);
5002
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005003 schedule_delayed_work(&adapter->func_recovery_work,
5004 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005005
5006 be_cmd_query_port_name(adapter, &port_name);
5007
Sathya Perlad3791422012-09-28 04:39:44 +00005008 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5009 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005011 return 0;
5012
Sathya Perla5fb379e2009-06-18 00:02:59 +00005013unsetup:
5014 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005015stats_clean:
5016 be_stats_cleanup(adapter);
5017ctrl_clean:
5018 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005019free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005020 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005021rel_reg:
5022 pci_release_regions(pdev);
5023disable_dev:
5024 pci_disable_device(pdev);
5025do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005026 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005027 return status;
5028}
5029
5030static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5031{
5032 struct be_adapter *adapter = pci_get_drvdata(pdev);
5033 struct net_device *netdev = adapter->netdev;
5034
Suresh Reddy76a9e082014-01-15 13:23:40 +05305035 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005036 be_setup_wol(adapter, true);
5037
Ajit Khaparded4360d62013-11-22 12:51:09 -06005038 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005039 cancel_delayed_work_sync(&adapter->func_recovery_work);
5040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005041 netif_device_detach(netdev);
5042 if (netif_running(netdev)) {
5043 rtnl_lock();
5044 be_close(netdev);
5045 rtnl_unlock();
5046 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005047 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005048
5049 pci_save_state(pdev);
5050 pci_disable_device(pdev);
5051 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5052 return 0;
5053}
5054
5055static int be_resume(struct pci_dev *pdev)
5056{
5057 int status = 0;
5058 struct be_adapter *adapter = pci_get_drvdata(pdev);
5059 struct net_device *netdev = adapter->netdev;
5060
5061 netif_device_detach(netdev);
5062
5063 status = pci_enable_device(pdev);
5064 if (status)
5065 return status;
5066
Yijing Wang1ca01512013-06-27 20:53:42 +08005067 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005068 pci_restore_state(pdev);
5069
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305070 status = be_fw_wait_ready(adapter);
5071 if (status)
5072 return status;
5073
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005074 status = be_cmd_reset_function(adapter);
5075 if (status)
5076 return status;
5077
Ajit Khaparded4360d62013-11-22 12:51:09 -06005078 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005079 /* tell fw we're ready to fire cmds */
5080 status = be_cmd_fw_init(adapter);
5081 if (status)
5082 return status;
5083
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005084 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005085 if (netif_running(netdev)) {
5086 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005087 be_open(netdev);
5088 rtnl_unlock();
5089 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005090
5091 schedule_delayed_work(&adapter->func_recovery_work,
5092 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005093 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005094
Suresh Reddy76a9e082014-01-15 13:23:40 +05305095 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005096 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005098 return 0;
5099}
5100
Sathya Perla82456b02010-02-17 01:35:37 +00005101/*
5102 * An FLR will stop BE from DMAing any data.
5103 */
5104static void be_shutdown(struct pci_dev *pdev)
5105{
5106 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005107
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005108 if (!adapter)
5109 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005110
Devesh Sharmad114f992014-06-10 19:32:15 +05305111 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005112 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005113 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005114
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005115 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005116
Ajit Khaparde57841862011-04-06 18:08:43 +00005117 be_cmd_reset_function(adapter);
5118
Sathya Perla82456b02010-02-17 01:35:37 +00005119 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005120}
5121
Sathya Perlacf588472010-02-14 21:22:01 +00005122static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305123 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005124{
5125 struct be_adapter *adapter = pci_get_drvdata(pdev);
5126 struct net_device *netdev = adapter->netdev;
5127
5128 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5129
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005130 if (!adapter->eeh_error) {
5131 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005132
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005133 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005134
Sathya Perlacf588472010-02-14 21:22:01 +00005135 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005136 netif_device_detach(netdev);
5137 if (netif_running(netdev))
5138 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005139 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005140
5141 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005142 }
Sathya Perlacf588472010-02-14 21:22:01 +00005143
5144 if (state == pci_channel_io_perm_failure)
5145 return PCI_ERS_RESULT_DISCONNECT;
5146
5147 pci_disable_device(pdev);
5148
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005149 /* The error could cause the FW to trigger a flash debug dump.
5150 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005151 * can cause it not to recover; wait for it to finish.
5152 * Wait only for first function as it is needed only once per
5153 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005154 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005155 if (pdev->devfn == 0)
5156 ssleep(30);
5157
Sathya Perlacf588472010-02-14 21:22:01 +00005158 return PCI_ERS_RESULT_NEED_RESET;
5159}
5160
5161static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5162{
5163 struct be_adapter *adapter = pci_get_drvdata(pdev);
5164 int status;
5165
5166 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005167
5168 status = pci_enable_device(pdev);
5169 if (status)
5170 return PCI_ERS_RESULT_DISCONNECT;
5171
5172 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005173 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005174 pci_restore_state(pdev);
5175
5176 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005177 dev_info(&adapter->pdev->dev,
5178 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005179 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005180 if (status)
5181 return PCI_ERS_RESULT_DISCONNECT;
5182
Sathya Perlad6b6d982012-09-05 01:56:48 +00005183 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005184 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005185 return PCI_ERS_RESULT_RECOVERED;
5186}
5187
5188static void be_eeh_resume(struct pci_dev *pdev)
5189{
5190 int status = 0;
5191 struct be_adapter *adapter = pci_get_drvdata(pdev);
5192 struct net_device *netdev = adapter->netdev;
5193
5194 dev_info(&adapter->pdev->dev, "EEH resume\n");
5195
5196 pci_save_state(pdev);
5197
Kalesh AP2d177be2013-04-28 22:22:29 +00005198 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005199 if (status)
5200 goto err;
5201
Kalesh AP03a58ba2014-05-13 14:03:11 +05305202 /* On some BE3 FW versions, after a HW reset,
5203 * interrupts will remain disabled for each function.
5204 * So, explicitly enable interrupts
5205 */
5206 be_intr_set(adapter, true);
5207
Kalesh AP2d177be2013-04-28 22:22:29 +00005208 /* tell fw we're ready to fire cmds */
5209 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005210 if (status)
5211 goto err;
5212
Sathya Perlacf588472010-02-14 21:22:01 +00005213 status = be_setup(adapter);
5214 if (status)
5215 goto err;
5216
5217 if (netif_running(netdev)) {
5218 status = be_open(netdev);
5219 if (status)
5220 goto err;
5221 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005222
5223 schedule_delayed_work(&adapter->func_recovery_work,
5224 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005225 netif_device_attach(netdev);
5226 return;
5227err:
5228 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005229}
5230
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005231static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005232 .error_detected = be_eeh_err_detected,
5233 .slot_reset = be_eeh_reset,
5234 .resume = be_eeh_resume,
5235};
5236
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005237static struct pci_driver be_driver = {
5238 .name = DRV_NAME,
5239 .id_table = be_dev_ids,
5240 .probe = be_probe,
5241 .remove = be_remove,
5242 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005243 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005244 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005245 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005246};
5247
5248static int __init be_init_module(void)
5249{
Joe Perches8e95a202009-12-03 07:58:21 +00005250 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5251 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005252 printk(KERN_WARNING DRV_NAME
5253 " : Module param rx_frag_size must be 2048/4096/8192."
5254 " Using 2048\n");
5255 rx_frag_size = 2048;
5256 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005257
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005258 return pci_register_driver(&be_driver);
5259}
5260module_init(be_init_module);
5261
5262static void __exit be_exit_module(void)
5263{
5264 pci_unregister_driver(&be_driver);
5265}
5266module_exit(be_exit_module);