blob: 37a26b0b7e33162ecdff92435d3aa7b2d06b600c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666{
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 struct be_tx_stats *stats = tx_stats(txo);
668
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000688 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689}
690
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530692 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
697 vlan_tag = vlan_tx_tag_get(skb);
698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
Sathya Perlac9c47142014-03-27 10:46:19 +0530707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Sathya Perlac9c47142014-03-27 10:46:19 +0530724 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530736 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700748 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 }
753
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530765 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000772 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000773 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000776 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000778 }
779}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000786 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500787 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000789 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 queue_head_inc(txq);
799
David S. Millerebc8d2a2009-06-09 01:01:31 -0700800 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700801 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530802
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000805 goto dma_err;
806 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ian Campbellb061b392011-08-29 23:18:23 +0000817 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000818 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000820 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 }
827
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla7101e112010-03-22 20:41:12 +0000838dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 while (copied) {
846 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000847 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 map_single = false;
849 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530850 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000851 queue_head_inc(txq);
852 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500853 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000854 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855}
856
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000858 struct sk_buff *skb,
859 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000860{
861 u16 vlan_tag = 0;
862
863 skb = skb_share_check(skb, GFP_ATOMIC);
864 if (unlikely(!skb))
865 return skb;
866
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000867 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000868 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530869
870 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
871 if (!vlan_tag)
872 vlan_tag = adapter->pvid;
873 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
874 * skip VLAN insertion
875 */
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000879
880 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100881 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
882 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100891 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
892 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000893 if (unlikely(!skb))
894 return skb;
895 if (skip_hw_vlan)
896 *skip_hw_vlan = true;
897 }
898
Somnath Kotur93040ae2012-06-26 22:32:10 +0000899 return skb;
900}
901
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000902static bool be_ipv6_exthdr_check(struct sk_buff *skb)
903{
904 struct ethhdr *eh = (struct ethhdr *)skb->data;
905 u16 offset = ETH_HLEN;
906
907 if (eh->h_proto == htons(ETH_P_IPV6)) {
908 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
909
910 offset += sizeof(struct ipv6hdr);
911 if (ip6h->nexthdr != NEXTHDR_TCP &&
912 ip6h->nexthdr != NEXTHDR_UDP) {
913 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530914 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000915
916 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
917 if (ehdr->hdrlen == 0xff)
918 return true;
919 }
920 }
921 return false;
922}
923
924static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
925{
926 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
927}
928
Sathya Perla748b5392014-05-09 13:29:13 +0530929static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000930{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000931 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000932}
933
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530934static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
935 struct sk_buff *skb,
936 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000938 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000939 unsigned int eth_hdr_len;
940 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000941
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000942 /* For padded packets, BE HW modifies tot_len field in IP header
943 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000944 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000945 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000946 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
947 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000948 if (skb->len <= 60 &&
949 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000950 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000951 ip = (struct iphdr *)ip_hdr(skb);
952 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
953 }
954
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000955 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530956 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000957 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530958 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000959 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530960 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000961
Somnath Kotur93040ae2012-06-26 22:32:10 +0000962 /* HW has a bug wherein it will calculate CSUM for VLAN
963 * pkts even though it is disabled.
964 * Manually insert VLAN in pkt.
965 */
966 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000967 vlan_tx_tag_present(skb)) {
968 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000969 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530970 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 }
972
973 /* HW may lockup when VLAN HW tagging is requested on
974 * certain ipv6 packets. Drop such pkts if the HW workaround to
975 * skip HW tagging is not enabled by FW.
976 */
977 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530978 (adapter->pvid || adapter->qnq_vid) &&
979 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000980 goto tx_drop;
981
982 /* Manual VLAN tag insertion to prevent:
983 * ASIC lockup when the ASIC inserts VLAN tag into
984 * certain ipv6 packets. Insert VLAN tags in driver,
985 * and set event, completion, vlan bits accordingly
986 * in the Tx WRB.
987 */
988 if (be_ipv6_tx_stall_chk(adapter, skb) &&
989 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000990 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000991 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530992 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000993 }
994
Sathya Perlaee9c7992013-05-22 23:04:55 +0000995 return skb;
996tx_drop:
997 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530998err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000999 return NULL;
1000}
1001
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301002static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1003 struct sk_buff *skb,
1004 bool *skip_hw_vlan)
1005{
1006 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1007 * less may cause a transmit stall on that port. So the work-around is
1008 * to pad short packets (<= 32 bytes) to a 36-byte length.
1009 */
1010 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001011 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301012 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001024static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1025{
1026 struct be_queue_info *txq = &txo->q;
1027 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1028
1029 /* Mark the last request eventable if it hasn't been marked already */
1030 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1031 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1032
1033 /* compose a dummy wrb if there are odd set of wrbs to notify */
1034 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1035 wrb_fill(queue_head_node(txq), 0, 0);
1036 queue_head_inc(txq);
1037 atomic_inc(&txq->used);
1038 txo->pend_wrb_cnt++;
1039 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1040 TX_HDR_WRB_NUM_SHIFT);
1041 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1042 TX_HDR_WRB_NUM_SHIFT);
1043 }
1044 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1045 txo->pend_wrb_cnt = 0;
1046}
1047
Sathya Perlaee9c7992013-05-22 23:04:55 +00001048static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1049{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001050 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001051 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001052 u16 q_idx = skb_get_queue_mapping(skb);
1053 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001054 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001055 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056
1057 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001058 if (unlikely(!skb))
1059 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001060
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001061 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1062 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001064 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001066
1067 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1068 netif_stop_subqueue(netdev, q_idx);
1069 tx_stats(txo)->tx_stops++;
1070 }
1071
1072 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1073 be_xmit_flush(adapter, txo);
1074
1075 return NETDEV_TX_OK;
1076drop:
1077 tx_stats(txo)->tx_drv_drops++;
1078 /* Flush the already enqueued tx requests */
1079 if (flush && txo->pend_wrb_cnt)
1080 be_xmit_flush(adapter, txo);
1081
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return NETDEV_TX_OK;
1083}
1084
1085static int be_change_mtu(struct net_device *netdev, int new_mtu)
1086{
1087 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301088 struct device *dev = &adapter->pdev->dev;
1089
1090 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1091 dev_info(dev, "MTU must be between %d and %d bytes\n",
1092 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 return -EINVAL;
1094 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301095
1096 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301097 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 netdev->mtu = new_mtu;
1099 return 0;
1100}
1101
1102/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001103 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1104 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 */
Sathya Perla10329df2012-06-05 19:37:18 +00001106static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107{
Vasundhara Volam50762662014-09-12 17:39:14 +05301108 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001109 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301110 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001111 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001112
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001113 /* No need to further configure vids if in promiscuous mode */
1114 if (adapter->promiscuous)
1115 return 0;
1116
Sathya Perla92bf14a2013-08-27 16:57:32 +05301117 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001118 goto set_vlan_promisc;
1119
1120 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301121 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1122 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001123
Kalesh AP4d567d92014-05-09 13:29:17 +05301124 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001125 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001126 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301127 if (addl_status(status) ==
1128 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001129 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301130 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001131 } else {
1132 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1133 /* hw VLAN filtering re-enabled. */
1134 status = be_cmd_rx_filter(adapter,
1135 BE_FLAGS_VLAN_PROMISC, OFF);
1136 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301137 dev_info(dev,
1138 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001139 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001140 }
1141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001143
Sathya Perlab31c50a2009-09-17 10:30:13 -07001144 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145
1146set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301147 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1148 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001149
1150 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1151 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301152 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001153 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1154 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301155 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001156 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157}
1158
Patrick McHardy80d5c362013-04-19 02:04:28 +00001159static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160{
1161 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001162 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001164 /* Packets with VID 0 are always received by Lancer by default */
1165 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301166 return status;
1167
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301168 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301169 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001170
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301171 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301172 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001173
Somnath Kotura6b74e02014-01-21 15:50:55 +05301174 status = be_vid_config(adapter);
1175 if (status) {
1176 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301177 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301178 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301179
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001180 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181}
1182
Patrick McHardy80d5c362013-04-19 02:04:28 +00001183static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184{
1185 struct be_adapter *adapter = netdev_priv(netdev);
1186
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001187 /* Packets with VID 0 are always received by Lancer by default */
1188 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301189 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001190
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301191 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301192 adapter->vlans_added--;
1193
1194 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195}
1196
Somnath kotur7ad09452014-03-03 14:24:43 +05301197static void be_clear_promisc(struct be_adapter *adapter)
1198{
1199 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301200 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301201
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203}
1204
Sathya Perlaa54769f2011-10-24 02:45:00 +00001205static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206{
1207 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001208 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
1210 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001211 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001212 adapter->promiscuous = true;
1213 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001215
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001216 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001217 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301218 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001219 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001220 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001221 }
1222
Sathya Perlae7b909a2009-11-22 22:01:10 +00001223 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001224 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301225 netdev_mc_count(netdev) > be_max_mc(adapter))
1226 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001227
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001228 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1229 struct netdev_hw_addr *ha;
1230 int i = 1; /* First slot is claimed by the Primary MAC */
1231
1232 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1233 be_cmd_pmac_del(adapter, adapter->if_handle,
1234 adapter->pmac_id[i], 0);
1235 }
1236
Sathya Perla92bf14a2013-08-27 16:57:32 +05301237 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001238 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1239 adapter->promiscuous = true;
1240 goto done;
1241 }
1242
1243 netdev_for_each_uc_addr(ha, adapter->netdev) {
1244 adapter->uc_macs++; /* First slot is for Primary MAC */
1245 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1246 adapter->if_handle,
1247 &adapter->pmac_id[adapter->uc_macs], 0);
1248 }
1249 }
1250
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001251 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301252 if (!status) {
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1255 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001256 }
Kalesh APa0794882014-05-30 19:06:23 +05301257
1258set_mcast_promisc:
1259 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1260 return;
1261
1262 /* Set to MCAST promisc mode if setting MULTICAST address fails
1263 * or if num configured exceeds what we support
1264 */
1265 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1266 if (!status)
1267 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001268done:
1269 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270}
1271
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001272static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1273{
1274 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001275 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276 int status;
1277
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001279 return -EPERM;
1280
Sathya Perla11ac75e2011-12-13 00:58:50 +00001281 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001282 return -EINVAL;
1283
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301284 /* Proceed further only if user provided MAC is different
1285 * from active MAC
1286 */
1287 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1288 return 0;
1289
Sathya Perla3175d8c2013-07-23 15:25:03 +05301290 if (BEx_chip(adapter)) {
1291 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1292 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001293
Sathya Perla11ac75e2011-12-13 00:58:50 +00001294 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1295 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301296 } else {
1297 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1298 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001299 }
1300
Kalesh APabccf232014-07-17 16:20:24 +05301301 if (status) {
1302 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1303 mac, vf, status);
1304 return be_cmd_status(status);
1305 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001306
Kalesh APabccf232014-07-17 16:20:24 +05301307 ether_addr_copy(vf_cfg->mac_addr, mac);
1308
1309 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001310}
1311
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001312static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301313 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001314{
1315 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001316 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001317
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319 return -EPERM;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001322 return -EINVAL;
1323
1324 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001325 vi->max_tx_rate = vf_cfg->tx_rate;
1326 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001327 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1328 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001329 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301330 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001331
1332 return 0;
1333}
1334
Sathya Perla748b5392014-05-09 13:29:13 +05301335static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001336{
1337 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001338 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001339 int status = 0;
1340
Sathya Perla11ac75e2011-12-13 00:58:50 +00001341 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 return -EPERM;
1343
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001344 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001345 return -EINVAL;
1346
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001347 if (vlan || qos) {
1348 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301349 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001350 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1351 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001352 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001353 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301354 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1355 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001356 }
1357
Kalesh APabccf232014-07-17 16:20:24 +05301358 if (status) {
1359 dev_err(&adapter->pdev->dev,
1360 "VLAN %d config on VF %d failed : %#x\n", vlan,
1361 vf, status);
1362 return be_cmd_status(status);
1363 }
1364
1365 vf_cfg->vlan_tag = vlan;
1366
1367 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001368}
1369
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001370static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1371 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001372{
1373 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301374 struct device *dev = &adapter->pdev->dev;
1375 int percent_rate, status = 0;
1376 u16 link_speed = 0;
1377 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001378
Sathya Perla11ac75e2011-12-13 00:58:50 +00001379 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001380 return -EPERM;
1381
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001382 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001383 return -EINVAL;
1384
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001385 if (min_tx_rate)
1386 return -EINVAL;
1387
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301388 if (!max_tx_rate)
1389 goto config_qos;
1390
1391 status = be_cmd_link_status_query(adapter, &link_speed,
1392 &link_status, 0);
1393 if (status)
1394 goto err;
1395
1396 if (!link_status) {
1397 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301398 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301399 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001400 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001401
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301402 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1403 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1404 link_speed);
1405 status = -EINVAL;
1406 goto err;
1407 }
1408
1409 /* On Skyhawk the QOS setting must be done only as a % value */
1410 percent_rate = link_speed / 100;
1411 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1412 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1413 percent_rate);
1414 status = -EINVAL;
1415 goto err;
1416 }
1417
1418config_qos:
1419 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001420 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301421 goto err;
1422
1423 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1424 return 0;
1425
1426err:
1427 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1428 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301429 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001430}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301431
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301432static int be_set_vf_link_state(struct net_device *netdev, int vf,
1433 int link_state)
1434{
1435 struct be_adapter *adapter = netdev_priv(netdev);
1436 int status;
1437
1438 if (!sriov_enabled(adapter))
1439 return -EPERM;
1440
1441 if (vf >= adapter->num_vfs)
1442 return -EINVAL;
1443
1444 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301445 if (status) {
1446 dev_err(&adapter->pdev->dev,
1447 "Link state change on VF %d failed: %#x\n", vf, status);
1448 return be_cmd_status(status);
1449 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301450
Kalesh APabccf232014-07-17 16:20:24 +05301451 adapter->vf_cfg[vf].plink_tracking = link_state;
1452
1453 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301454}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001455
Sathya Perla2632baf2013-10-01 16:00:00 +05301456static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1457 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Sathya Perla2632baf2013-10-01 16:00:00 +05301459 aic->rx_pkts_prev = rx_pkts;
1460 aic->tx_reqs_prev = tx_pkts;
1461 aic->jiffies = now;
1462}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001463
Sathya Perla2632baf2013-10-01 16:00:00 +05301464static void be_eqd_update(struct be_adapter *adapter)
1465{
1466 struct be_set_eqd set_eqd[MAX_EVT_QS];
1467 int eqd, i, num = 0, start;
1468 struct be_aic_obj *aic;
1469 struct be_eq_obj *eqo;
1470 struct be_rx_obj *rxo;
1471 struct be_tx_obj *txo;
1472 u64 rx_pkts, tx_pkts;
1473 ulong now;
1474 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001475
Sathya Perla2632baf2013-10-01 16:00:00 +05301476 for_all_evt_queues(adapter, eqo, i) {
1477 aic = &adapter->aic_obj[eqo->idx];
1478 if (!aic->enable) {
1479 if (aic->jiffies)
1480 aic->jiffies = 0;
1481 eqd = aic->et_eqd;
1482 goto modify_eqd;
1483 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484
Sathya Perla2632baf2013-10-01 16:00:00 +05301485 rxo = &adapter->rx_obj[eqo->idx];
1486 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001487 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301488 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001489 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001490
Sathya Perla2632baf2013-10-01 16:00:00 +05301491 txo = &adapter->tx_obj[eqo->idx];
1492 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001493 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001495 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001496
Sathya Perla2632baf2013-10-01 16:00:00 +05301497 /* Skip, if wrapped around or first calculation */
1498 now = jiffies;
1499 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1500 rx_pkts < aic->rx_pkts_prev ||
1501 tx_pkts < aic->tx_reqs_prev) {
1502 be_aic_update(aic, rx_pkts, tx_pkts, now);
1503 continue;
1504 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001505
Sathya Perla2632baf2013-10-01 16:00:00 +05301506 delta = jiffies_to_msecs(now - aic->jiffies);
1507 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1508 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1509 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001510
Sathya Perla2632baf2013-10-01 16:00:00 +05301511 if (eqd < 8)
1512 eqd = 0;
1513 eqd = min_t(u32, eqd, aic->max_eqd);
1514 eqd = max_t(u32, eqd, aic->min_eqd);
1515
1516 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301518 if (eqd != aic->prev_eqd) {
1519 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1520 set_eqd[num].eq_id = eqo->q.id;
1521 aic->prev_eqd = eqd;
1522 num++;
1523 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001524 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301525
1526 if (num)
1527 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001528}
1529
Sathya Perla3abcded2010-10-03 22:12:27 -07001530static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301531 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001532{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001533 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001534
Sathya Perlaab1594e2011-07-25 19:10:15 +00001535 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001541 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001542 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001543 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544}
1545
Sathya Perla2e588f82011-03-11 02:49:26 +00001546static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001547{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001548 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301549 * Also ignore ipcksm for ipv6 pkts
1550 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301552 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001553}
1554
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301555static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001557 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301560 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 BUG_ON(!rx_page_info->page);
1564
Sathya Perlae50287b2014-03-04 12:14:38 +05301565 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001566 dma_unmap_page(&adapter->pdev->dev,
1567 dma_unmap_addr(rx_page_info, bus),
1568 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301569 rx_page_info->last_frag = false;
1570 } else {
1571 dma_sync_single_for_cpu(&adapter->pdev->dev,
1572 dma_unmap_addr(rx_page_info, bus),
1573 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301576 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577 atomic_dec(&rxq->used);
1578 return rx_page_info;
1579}
1580
1581/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582static void be_rx_compl_discard(struct be_rx_obj *rxo,
1583 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001586 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001588 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301589 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001590 put_page(page_info->page);
1591 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592 }
1593}
1594
1595/*
1596 * skb_fill_rx_data forms a complete skb for an ether frame
1597 * indicated by rxcp.
1598 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001599static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1600 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001603 u16 i, j;
1604 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 u8 *start;
1606
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301607 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 start = page_address(page_info->page) + page_info->page_offset;
1609 prefetch(start);
1610
1611 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001612 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 skb->len = curr_frag_len;
1615 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001616 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 /* Complete packet has now been moved to data */
1618 put_page(page_info->page);
1619 skb->data_len = 0;
1620 skb->tail += curr_frag_len;
1621 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001622 hdr_len = ETH_HLEN;
1623 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001625 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 skb_shinfo(skb)->frags[0].page_offset =
1627 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301628 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1629 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001631 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 skb->tail += hdr_len;
1633 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001634 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perla2e588f82011-03-11 02:49:26 +00001636 if (rxcp->pkt_size <= rx_frag_size) {
1637 BUG_ON(rxcp->num_rcvd != 1);
1638 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 }
1640
1641 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001642 remaining = rxcp->pkt_size - curr_frag_len;
1643 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301644 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001645 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001647 /* Coalesce all frags from the same physical page in one slot */
1648 if (page_info->page_offset == 0) {
1649 /* Fresh page */
1650 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001651 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001652 skb_shinfo(skb)->frags[j].page_offset =
1653 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001654 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001655 skb_shinfo(skb)->nr_frags++;
1656 } else {
1657 put_page(page_info->page);
1658 }
1659
Eric Dumazet9e903e02011-10-18 21:00:24 +00001660 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 skb->len += curr_frag_len;
1662 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001663 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001664 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001665 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001667 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668}
1669
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001670/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301671static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001672 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001675 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001677
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001678 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001679 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001680 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 return;
1683 }
1684
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001685 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001687 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001688 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001689 else
1690 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001692 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001693 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001694 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001695 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301696
Tom Herbertb6c0e892014-08-27 21:27:17 -07001697 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301698 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699
Jiri Pirko343e43c2011-08-25 02:50:51 +00001700 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001701 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001702
1703 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704}
1705
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001706/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001707static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1708 struct napi_struct *napi,
1709 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001711 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001713 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001714 u16 remaining, curr_frag_len;
1715 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001717 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001718 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001719 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001720 return;
1721 }
1722
Sathya Perla2e588f82011-03-11 02:49:26 +00001723 remaining = rxcp->pkt_size;
1724 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301725 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
1727 curr_frag_len = min(remaining, rx_frag_size);
1728
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001729 /* Coalesce all frags from the same physical page in one slot */
1730 if (i == 0 || page_info->page_offset == 0) {
1731 /* First frag or Fresh page */
1732 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001733 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001734 skb_shinfo(skb)->frags[j].page_offset =
1735 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001736 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001737 } else {
1738 put_page(page_info->page);
1739 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001740 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001741 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 memset(page_info, 0, sizeof(*page_info));
1744 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001745 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001747 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001748 skb->len = rxcp->pkt_size;
1749 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001750 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001751 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001752 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001753 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301754
Tom Herbertb6c0e892014-08-27 21:27:17 -07001755 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301756 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001757
Jiri Pirko343e43c2011-08-25 02:50:51 +00001758 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001759 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001760
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001761 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762}
1763
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001764static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1765 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301767 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1768 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1769 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1770 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1771 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1772 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1773 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1774 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1775 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1776 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1777 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001778 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301779 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1780 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001781 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301782 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301783 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301784 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001785}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001787static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1788 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001789{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301790 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1791 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1792 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1793 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1794 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1795 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1796 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1797 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1798 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1799 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1800 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001801 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301802 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1803 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001804 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301805 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1806 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001807}
1808
1809static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1810{
1811 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1812 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1813 struct be_adapter *adapter = rxo->adapter;
1814
1815 /* For checking the valid bit it is Ok to use either definition as the
1816 * valid bit is at the same position in both v0 and v1 Rx compl */
1817 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818 return NULL;
1819
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001820 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001821 be_dws_le_to_cpu(compl, sizeof(*compl));
1822
1823 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001824 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001825 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001827
Somnath Koture38b1702013-05-29 22:55:56 +00001828 if (rxcp->ip_frag)
1829 rxcp->l4_csum = 0;
1830
Sathya Perla15d72182011-03-21 20:49:26 +00001831 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301832 /* In QNQ modes, if qnq bit is not set, then the packet was
1833 * tagged only with the transparent outer vlan-tag and must
1834 * not be treated as a vlan packet by host
1835 */
1836 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001837 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001838
Sathya Perla15d72182011-03-21 20:49:26 +00001839 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001840 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001841
Somnath Kotur939cf302011-08-18 21:51:49 -07001842 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301843 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001844 rxcp->vlanf = 0;
1845 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001846
1847 /* As the compl has been parsed, reset it; we wont touch it again */
1848 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
Sathya Perla3abcded2010-10-03 22:12:27 -07001850 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 return rxcp;
1852}
1853
Eric Dumazet1829b082011-03-01 05:48:12 +00001854static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001857
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001859 gfp |= __GFP_COMP;
1860 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861}
1862
1863/*
1864 * Allocate a page, split it to fragments of size rx_frag_size and post as
1865 * receive buffers to BE
1866 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301867static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868{
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001870 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001873 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 struct be_eth_rx_d *rxd;
1875 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301876 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301879 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001881 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001883 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 break;
1885 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001886 page_dmaaddr = dma_map_page(dev, pagep, 0,
1887 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001888 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001889 if (dma_mapping_error(dev, page_dmaaddr)) {
1890 put_page(pagep);
1891 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301892 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001893 break;
1894 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301895 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 } else {
1897 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301898 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902
1903 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301904 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1906 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
1908 /* Any space left in the current big page for another frag? */
1909 if ((page_offset + rx_frag_size + rx_frag_size) >
1910 adapter->big_page_size) {
1911 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301912 page_info->last_frag = true;
1913 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1914 } else {
1915 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001917
1918 prev_page_info = page_info;
1919 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301922
1923 /* Mark the last frag of a page when we break out of the above loop
1924 * with no more slots available in the RXQ
1925 */
1926 if (pagep) {
1927 prev_page_info->last_frag = true;
1928 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1929 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930
1931 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301933 if (rxo->rx_post_starved)
1934 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301935 do {
1936 notify = min(256u, posted);
1937 be_rxq_notify(adapter, rxq->id, notify);
1938 posted -= notify;
1939 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001940 } else if (atomic_read(&rxq->used) == 0) {
1941 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001942 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944}
1945
Sathya Perla5fb379e2009-06-18 00:02:59 +00001946static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1949
1950 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1951 return NULL;
1952
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001953 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1955
1956 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1957
1958 queue_tail_inc(tx_cq);
1959 return txcp;
1960}
1961
Sathya Perla3c8def92011-06-12 20:01:58 +00001962static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301963 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964{
Sathya Perla3c8def92011-06-12 20:01:58 +00001965 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001966 struct be_queue_info *txq = &txo->q;
1967 u16 frag_index, num_wrbs = 0;
1968 struct sk_buff *skb = NULL;
1969 bool unmap_skb_hdr = false;
1970 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001972 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001973 if (sent_skbs[txq->tail]) {
1974 /* Free skb from prev req */
1975 if (skb)
1976 dev_consume_skb_any(skb);
1977 skb = sent_skbs[txq->tail];
1978 sent_skbs[txq->tail] = NULL;
1979 queue_tail_inc(txq); /* skip hdr wrb */
1980 num_wrbs++;
1981 unmap_skb_hdr = true;
1982 }
Alexander Duycka73b7962009-12-02 16:48:18 +00001983 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001984 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001985 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001986 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001987 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001989 num_wrbs++;
1990 } while (frag_index != last_index);
1991 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001993 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994}
1995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996/* Return the number of events in the event queue */
1997static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001998{
1999 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002001
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 do {
2003 eqe = queue_tail_node(&eqo->q);
2004 if (eqe->evt == 0)
2005 break;
2006
2007 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002008 eqe->evt = 0;
2009 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 queue_tail_inc(&eqo->q);
2011 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002012
2013 return num;
2014}
2015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016/* Leaves the EQ is disarmed state */
2017static void be_eq_clean(struct be_eq_obj *eqo)
2018{
2019 int num = events_get(eqo);
2020
2021 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2022}
2023
2024static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025{
2026 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 struct be_queue_info *rxq = &rxo->q;
2028 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002029 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002030 struct be_adapter *adapter = rxo->adapter;
2031 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032
Sathya Perlad23e9462012-12-17 19:38:51 +00002033 /* Consume pending rx completions.
2034 * Wait for the flush completion (identified by zero num_rcvd)
2035 * to arrive. Notify CQ even when there are no more CQ entries
2036 * for HW to flush partially coalesced CQ entries.
2037 * In Lancer, there is no need to wait for flush compl.
2038 */
2039 for (;;) {
2040 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302041 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002042 if (lancer_chip(adapter))
2043 break;
2044
2045 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2046 dev_warn(&adapter->pdev->dev,
2047 "did not receive flush compl\n");
2048 break;
2049 }
2050 be_cq_notify(adapter, rx_cq->id, true, 0);
2051 mdelay(1);
2052 } else {
2053 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002054 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002055 if (rxcp->num_rcvd == 0)
2056 break;
2057 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 }
2059
Sathya Perlad23e9462012-12-17 19:38:51 +00002060 /* After cleanup, leave the CQ in unarmed state */
2061 be_cq_notify(adapter, rx_cq->id, false, 0);
2062
2063 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302064 while (atomic_read(&rxq->used) > 0) {
2065 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066 put_page(page_info->page);
2067 memset(page_info, 0, sizeof(*page_info));
2068 }
2069 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302070 rxq->tail = 0;
2071 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072}
2073
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002074static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002076 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2077 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002078 struct be_tx_obj *txo;
2079 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002080 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002081 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302083 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002084 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002085 pending_txqs = adapter->num_tx_qs;
2086
2087 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302088 cmpl = 0;
2089 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002090 txq = &txo->q;
2091 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302092 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002093 num_wrbs += be_tx_compl_process(adapter, txo,
2094 end_idx);
2095 cmpl++;
2096 }
2097 if (cmpl) {
2098 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2099 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302100 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002101 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002102 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002103 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002104 }
2105
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302106 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002107 break;
2108
2109 mdelay(1);
2110 } while (true);
2111
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002112 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002113 for_all_tx_queues(adapter, txo, i) {
2114 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002115
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002116 if (atomic_read(&txq->used)) {
2117 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2118 i, atomic_read(&txq->used));
2119 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002120 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002121 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2122 txq->len);
2123 /* Use the tx-compl process logic to handle requests
2124 * that were not sent to the HW.
2125 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002126 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2127 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002128 BUG_ON(atomic_read(&txq->used));
2129 txo->pend_wrb_cnt = 0;
2130 /* Since hw was never notified of these requests,
2131 * reset TXQ indices
2132 */
2133 txq->head = notified_idx;
2134 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002135 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137}
2138
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139static void be_evt_queues_destroy(struct be_adapter *adapter)
2140{
2141 struct be_eq_obj *eqo;
2142 int i;
2143
2144 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002145 if (eqo->q.created) {
2146 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302148 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302149 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002150 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151 be_queue_free(adapter, &eqo->q);
2152 }
2153}
2154
2155static int be_evt_queues_create(struct be_adapter *adapter)
2156{
2157 struct be_queue_info *eq;
2158 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 int i, rc;
2161
Sathya Perla92bf14a2013-08-27 16:57:32 +05302162 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2163 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164
2165 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302166 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2167 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302168 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302169 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302172 aic->max_eqd = BE_MAX_EQD;
2173 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174
2175 eq = &eqo->q;
2176 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302177 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178 if (rc)
2179 return rc;
2180
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302181 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 if (rc)
2183 return rc;
2184 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002185 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186}
2187
Sathya Perla5fb379e2009-06-18 00:02:59 +00002188static void be_mcc_queues_destroy(struct be_adapter *adapter)
2189{
2190 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002191
Sathya Perla8788fdc2009-07-27 22:52:03 +00002192 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002194 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002195 be_queue_free(adapter, q);
2196
Sathya Perla8788fdc2009-07-27 22:52:03 +00002197 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002199 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 be_queue_free(adapter, q);
2201}
2202
2203/* Must be called only after TX qs are created as MCC shares TX EQ */
2204static int be_mcc_queues_create(struct be_adapter *adapter)
2205{
2206 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002207
Sathya Perla8788fdc2009-07-27 22:52:03 +00002208 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002209 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302210 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002211 goto err;
2212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 /* Use the default EQ for MCC completions */
2214 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002215 goto mcc_cq_free;
2216
Sathya Perla8788fdc2009-07-27 22:52:03 +00002217 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002218 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2219 goto mcc_cq_destroy;
2220
Sathya Perla8788fdc2009-07-27 22:52:03 +00002221 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002222 goto mcc_q_free;
2223
2224 return 0;
2225
2226mcc_q_free:
2227 be_queue_free(adapter, q);
2228mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002229 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002230mcc_cq_free:
2231 be_queue_free(adapter, cq);
2232err:
2233 return -1;
2234}
2235
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236static void be_tx_queues_destroy(struct be_adapter *adapter)
2237{
2238 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002239 struct be_tx_obj *txo;
2240 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sathya Perla3c8def92011-06-12 20:01:58 +00002242 for_all_tx_queues(adapter, txo, i) {
2243 q = &txo->q;
2244 if (q->created)
2245 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2246 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247
Sathya Perla3c8def92011-06-12 20:01:58 +00002248 q = &txo->cq;
2249 if (q->created)
2250 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2251 be_queue_free(adapter, q);
2252 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253}
2254
Sathya Perla77071332013-08-27 16:57:34 +05302255static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002258 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302259 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
Sathya Perla92bf14a2013-08-27 16:57:32 +05302261 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002262
Sathya Perla3c8def92011-06-12 20:01:58 +00002263 for_all_tx_queues(adapter, txo, i) {
2264 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2266 sizeof(struct be_eth_tx_compl));
2267 if (status)
2268 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269
John Stultz827da442013-10-07 15:51:58 -07002270 u64_stats_init(&txo->stats.sync);
2271 u64_stats_init(&txo->stats.sync_compl);
2272
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 /* If num_evt_qs is less than num_tx_qs, then more than
2274 * one txq share an eq
2275 */
2276 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2277 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2278 if (status)
2279 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2282 sizeof(struct be_eth_wrb));
2283 if (status)
2284 return status;
2285
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002286 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 if (status)
2288 return status;
2289 }
2290
Sathya Perlad3791422012-09-28 04:39:44 +00002291 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2292 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 return 0;
2294}
2295
2296static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297{
2298 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 struct be_rx_obj *rxo;
2300 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 q = &rxo->cq;
2304 if (q->created)
2305 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2306 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308}
2309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002311{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 struct be_rx_obj *rxo;
2314 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315
Sathya Perla92bf14a2013-08-27 16:57:32 +05302316 /* We can create as many RSS rings as there are EQs. */
2317 adapter->num_rx_qs = adapter->num_evt_qs;
2318
2319 /* We'll use RSS only if atleast 2 RSS rings are supported.
2320 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302322 if (adapter->num_rx_qs > 1)
2323 adapter->num_rx_qs++;
2324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002326 for_all_rx_queues(adapter, rxo, i) {
2327 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 cq = &rxo->cq;
2329 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302330 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002331 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333
John Stultz827da442013-10-07 15:51:58 -07002334 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2336 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002337 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002338 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Sathya Perlad3791422012-09-28 04:39:44 +00002341 dev_info(&adapter->pdev->dev,
2342 "created %d RSS queue(s) and 1 default RX queue\n",
2343 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002345}
2346
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347static irqreturn_t be_intx(int irq, void *dev)
2348{
Sathya Perlae49cc342012-11-27 19:50:02 +00002349 struct be_eq_obj *eqo = dev;
2350 struct be_adapter *adapter = eqo->adapter;
2351 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002353 /* IRQ is not expected when NAPI is scheduled as the EQ
2354 * will not be armed.
2355 * But, this can happen on Lancer INTx where it takes
2356 * a while to de-assert INTx or in BE2 where occasionaly
2357 * an interrupt may be raised even when EQ is unarmed.
2358 * If NAPI is already scheduled, then counting & notifying
2359 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002360 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002361 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002362 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002363 __napi_schedule(&eqo->napi);
2364 if (num_evts)
2365 eqo->spurious_intr = 0;
2366 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002367 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002368
2369 /* Return IRQ_HANDLED only for the the first spurious intr
2370 * after a valid intr to stop the kernel from branding
2371 * this irq as a bad one!
2372 */
2373 if (num_evts || eqo->spurious_intr++ == 0)
2374 return IRQ_HANDLED;
2375 else
2376 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377}
2378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382
Sathya Perla0b545a62012-11-23 00:27:18 +00002383 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2384 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 return IRQ_HANDLED;
2386}
2387
Sathya Perla2e588f82011-03-11 02:49:26 +00002388static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389{
Somnath Koture38b1702013-05-29 22:55:56 +00002390 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391}
2392
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302394 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395{
Sathya Perla3abcded2010-10-03 22:12:27 -07002396 struct be_adapter *adapter = rxo->adapter;
2397 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002398 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302400 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
2402 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002403 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404 if (!rxcp)
2405 break;
2406
Sathya Perla12004ae2011-08-02 19:57:46 +00002407 /* Is it a flush compl that has no data */
2408 if (unlikely(rxcp->num_rcvd == 0))
2409 goto loop_continue;
2410
2411 /* Discard compl with partial DMA Lancer B0 */
2412 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002414 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002415 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002416
Sathya Perla12004ae2011-08-02 19:57:46 +00002417 /* On BE drop pkts that arrive due to imperfect filtering in
2418 * promiscuous mode on some skews
2419 */
2420 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302421 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002423 goto loop_continue;
2424 }
2425
Sathya Perla6384a4d2013-10-25 10:40:16 +05302426 /* Don't do gro when we're busy_polling */
2427 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002429 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302430 be_rx_compl_process(rxo, napi, rxcp);
2431
Sathya Perla12004ae2011-08-02 19:57:46 +00002432loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302433 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002434 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 }
2436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 if (work_done) {
2438 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002439
Sathya Perla6384a4d2013-10-25 10:40:16 +05302440 /* When an rx-obj gets into post_starved state, just
2441 * let be_worker do the posting.
2442 */
2443 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2444 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302445 be_post_rx_frags(rxo, GFP_ATOMIC,
2446 max_t(u32, MAX_RX_POST,
2447 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002449
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 return work_done;
2451}
2452
Kalesh AP512bb8a2014-09-02 09:56:49 +05302453static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2454{
2455 switch (status) {
2456 case BE_TX_COMP_HDR_PARSE_ERR:
2457 tx_stats(txo)->tx_hdr_parse_err++;
2458 break;
2459 case BE_TX_COMP_NDMA_ERR:
2460 tx_stats(txo)->tx_dma_err++;
2461 break;
2462 case BE_TX_COMP_ACL_ERR:
2463 tx_stats(txo)->tx_spoof_check_err++;
2464 break;
2465 }
2466}
2467
2468static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2469{
2470 switch (status) {
2471 case LANCER_TX_COMP_LSO_ERR:
2472 tx_stats(txo)->tx_tso_err++;
2473 break;
2474 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2475 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2476 tx_stats(txo)->tx_spoof_check_err++;
2477 break;
2478 case LANCER_TX_COMP_QINQ_ERR:
2479 tx_stats(txo)->tx_qinq_err++;
2480 break;
2481 case LANCER_TX_COMP_PARITY_ERR:
2482 tx_stats(txo)->tx_internal_parity_err++;
2483 break;
2484 case LANCER_TX_COMP_DMA_ERR:
2485 tx_stats(txo)->tx_dma_err++;
2486 break;
2487 }
2488}
2489
Sathya Perlac8f64612014-09-02 09:56:55 +05302490static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2491 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302494 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302495 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302496 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
Sathya Perlac8f64612014-09-02 09:56:55 +05302498 while ((txcp = be_tx_compl_get(&txo->cq))) {
2499 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2500 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2501 work_done++;
2502
Kalesh AP512bb8a2014-09-02 09:56:49 +05302503 compl_status = GET_TX_COMPL_BITS(status, txcp);
2504 if (compl_status) {
2505 if (lancer_chip(adapter))
2506 lancer_update_tx_err(txo, compl_status);
2507 else
2508 be_update_tx_err(txo, compl_status);
2509 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 }
2511
2512 if (work_done) {
2513 be_cq_notify(adapter, txo->cq.id, true, work_done);
2514 atomic_sub(num_wrbs, &txo->q.used);
2515
2516 /* As Tx wrbs have been freed up, wake up netdev queue
2517 * if it was stopped due to lack of tx wrbs. */
2518 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302519 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002520 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002521 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002523 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2524 tx_stats(txo)->tx_compl += work_done;
2525 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2526 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002527}
Sathya Perla3c8def92011-06-12 20:01:58 +00002528
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302529int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002530{
2531 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2532 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002533 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302534 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302535 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002536
Sathya Perla0b545a62012-11-23 00:27:18 +00002537 num_evts = events_get(eqo);
2538
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302539 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2540 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541
Sathya Perla6384a4d2013-10-25 10:40:16 +05302542 if (be_lock_napi(eqo)) {
2543 /* This loop will iterate twice for EQ0 in which
2544 * completions of the last RXQ (default one) are also processed
2545 * For other EQs the loop iterates only once
2546 */
2547 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2548 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2549 max_work = max(work, max_work);
2550 }
2551 be_unlock_napi(eqo);
2552 } else {
2553 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002554 }
2555
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002556 if (is_mcc_eqo(eqo))
2557 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002558
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 if (max_work < budget) {
2560 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002561 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002562 } else {
2563 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002564 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002565 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567}
2568
Sathya Perla6384a4d2013-10-25 10:40:16 +05302569#ifdef CONFIG_NET_RX_BUSY_POLL
2570static int be_busy_poll(struct napi_struct *napi)
2571{
2572 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2573 struct be_adapter *adapter = eqo->adapter;
2574 struct be_rx_obj *rxo;
2575 int i, work = 0;
2576
2577 if (!be_lock_busy_poll(eqo))
2578 return LL_FLUSH_BUSY;
2579
2580 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2581 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2582 if (work)
2583 break;
2584 }
2585
2586 be_unlock_busy_poll(eqo);
2587 return work;
2588}
2589#endif
2590
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002591void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002592{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002593 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2594 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002595 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302596 bool error_detected = false;
2597 struct device *dev = &adapter->pdev->dev;
2598 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002599
Sathya Perlad23e9462012-12-17 19:38:51 +00002600 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002601 return;
2602
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002603 if (lancer_chip(adapter)) {
2604 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2605 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2606 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302607 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002608 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302609 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302610 adapter->hw_error = true;
2611 /* Do not log error messages if its a FW reset */
2612 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2613 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2614 dev_info(dev, "Firmware update in progress\n");
2615 } else {
2616 error_detected = true;
2617 dev_err(dev, "Error detected in the card\n");
2618 dev_err(dev, "ERR: sliport status 0x%x\n",
2619 sliport_status);
2620 dev_err(dev, "ERR: sliport error1 0x%x\n",
2621 sliport_err1);
2622 dev_err(dev, "ERR: sliport error2 0x%x\n",
2623 sliport_err2);
2624 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002625 }
2626 } else {
2627 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302628 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002629 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302630 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002631 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302632 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002633 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302634 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002635
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002636 ue_lo = (ue_lo & ~ue_lo_mask);
2637 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002638
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302639 /* On certain platforms BE hardware can indicate spurious UEs.
2640 * Allow HW to stop working completely in case of a real UE.
2641 * Hence not setting the hw_error for UE detection.
2642 */
2643
2644 if (ue_lo || ue_hi) {
2645 error_detected = true;
2646 dev_err(dev,
2647 "Unrecoverable Error detected in the adapter");
2648 dev_err(dev, "Please reboot server to recover");
2649 if (skyhawk_chip(adapter))
2650 adapter->hw_error = true;
2651 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2652 if (ue_lo & 1)
2653 dev_err(dev, "UE: %s bit set\n",
2654 ue_status_low_desc[i]);
2655 }
2656 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2657 if (ue_hi & 1)
2658 dev_err(dev, "UE: %s bit set\n",
2659 ue_status_hi_desc[i]);
2660 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302661 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002662 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302663 if (error_detected)
2664 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002665}
2666
Sathya Perla8d56ff12009-11-22 22:02:26 +00002667static void be_msix_disable(struct be_adapter *adapter)
2668{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002669 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002670 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002671 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302672 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002673 }
2674}
2675
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002676static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002677{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002678 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002679 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680
Sathya Perla92bf14a2013-08-27 16:57:32 +05302681 /* If RoCE is supported, program the max number of NIC vectors that
2682 * may be configured via set-channels, along with vectors needed for
2683 * RoCe. Else, just program the number we'll use initially.
2684 */
2685 if (be_roce_supported(adapter))
2686 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2687 2 * num_online_cpus());
2688 else
2689 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002690
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002691 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692 adapter->msix_entries[i].entry = i;
2693
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002694 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2695 MIN_MSIX_VECTORS, num_vec);
2696 if (num_vec < 0)
2697 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002698
Sathya Perla92bf14a2013-08-27 16:57:32 +05302699 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2700 adapter->num_msix_roce_vec = num_vec / 2;
2701 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2702 adapter->num_msix_roce_vec);
2703 }
2704
2705 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2706
2707 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2708 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002709 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002710
2711fail:
2712 dev_warn(dev, "MSIx enable failed\n");
2713
2714 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2715 if (!be_physfn(adapter))
2716 return num_vec;
2717 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718}
2719
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002720static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302721 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302723 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724}
2725
2726static int be_msix_register(struct be_adapter *adapter)
2727{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 struct net_device *netdev = adapter->netdev;
2729 struct be_eq_obj *eqo;
2730 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732 for_all_evt_queues(adapter, eqo, i) {
2733 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2734 vec = be_msix_vec_get(adapter, eqo);
2735 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002736 if (status)
2737 goto err_msix;
2738 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002741err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002742 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2743 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2744 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302745 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002746 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 return status;
2748}
2749
2750static int be_irq_register(struct be_adapter *adapter)
2751{
2752 struct net_device *netdev = adapter->netdev;
2753 int status;
2754
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002755 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756 status = be_msix_register(adapter);
2757 if (status == 0)
2758 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002759 /* INTx is not supported for VF */
2760 if (!be_physfn(adapter))
2761 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762 }
2763
Sathya Perlae49cc342012-11-27 19:50:02 +00002764 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765 netdev->irq = adapter->pdev->irq;
2766 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002767 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768 if (status) {
2769 dev_err(&adapter->pdev->dev,
2770 "INTx request IRQ failed - err %d\n", status);
2771 return status;
2772 }
2773done:
2774 adapter->isr_registered = true;
2775 return 0;
2776}
2777
2778static void be_irq_unregister(struct be_adapter *adapter)
2779{
2780 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002782 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783
2784 if (!adapter->isr_registered)
2785 return;
2786
2787 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002788 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002789 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002790 goto done;
2791 }
2792
2793 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002794 for_all_evt_queues(adapter, eqo, i)
2795 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002796
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002797done:
2798 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002799}
2800
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002801static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002802{
2803 struct be_queue_info *q;
2804 struct be_rx_obj *rxo;
2805 int i;
2806
2807 for_all_rx_queues(adapter, rxo, i) {
2808 q = &rxo->q;
2809 if (q->created) {
2810 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002811 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002812 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002813 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002814 }
2815}
2816
Sathya Perla889cd4b2010-05-30 23:33:45 +00002817static int be_close(struct net_device *netdev)
2818{
2819 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 struct be_eq_obj *eqo;
2821 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002822
Kalesh APe1ad8e32014-04-14 16:12:41 +05302823 /* This protection is needed as be_close() may be called even when the
2824 * adapter is in cleared state (after eeh perm failure)
2825 */
2826 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2827 return 0;
2828
Parav Pandit045508a2012-03-26 14:27:13 +00002829 be_roce_dev_close(adapter);
2830
Ivan Veceradff345c52013-11-27 08:59:32 +01002831 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2832 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002833 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302834 be_disable_busy_poll(eqo);
2835 }
David S. Miller71237b62013-11-28 18:53:36 -05002836 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002837 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002838
2839 be_async_mcc_disable(adapter);
2840
2841 /* Wait for all pending tx completions to arrive so that
2842 * all tx skbs are freed.
2843 */
Sathya Perlafba87552013-05-08 02:05:50 +00002844 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302845 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002846
2847 be_rx_qs_destroy(adapter);
2848
Ajit Khaparded11a3472013-11-18 10:44:37 -06002849 for (i = 1; i < (adapter->uc_macs + 1); i++)
2850 be_cmd_pmac_del(adapter, adapter->if_handle,
2851 adapter->pmac_id[i], 0);
2852 adapter->uc_macs = 0;
2853
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002854 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002855 if (msix_enabled(adapter))
2856 synchronize_irq(be_msix_vec_get(adapter, eqo));
2857 else
2858 synchronize_irq(netdev->irq);
2859 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002860 }
2861
Sathya Perla889cd4b2010-05-30 23:33:45 +00002862 be_irq_unregister(adapter);
2863
Sathya Perla482c9e72011-06-29 23:33:17 +00002864 return 0;
2865}
2866
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002868{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002869 struct rss_info *rss = &adapter->rss_info;
2870 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00002871 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002872 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00002873
2874 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2876 sizeof(struct be_eth_rx_d));
2877 if (rc)
2878 return rc;
2879 }
2880
2881 /* The FW would like the default RXQ to be created first */
2882 rxo = default_rxo(adapter);
2883 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2884 adapter->if_handle, false, &rxo->rss_id);
2885 if (rc)
2886 return rc;
2887
2888 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002889 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002890 rx_frag_size, adapter->if_handle,
2891 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002892 if (rc)
2893 return rc;
2894 }
2895
2896 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302897 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2898 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002899 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302900 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002901 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302902 rss->rsstable[j + i] = rxo->rss_id;
2903 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002904 }
2905 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302906 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2907 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002908
2909 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302910 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2911 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302912 } else {
2913 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302914 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302915 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002916
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002917 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302918 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002919 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302920 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302921 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302922 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002923 }
2924
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002925 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302926
Sathya Perla482c9e72011-06-29 23:33:17 +00002927 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302929 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002930 return 0;
2931}
2932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933static int be_open(struct net_device *netdev)
2934{
2935 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002936 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002937 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002938 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002939 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002940 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002943 if (status)
2944 goto err;
2945
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002946 status = be_irq_register(adapter);
2947 if (status)
2948 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002951 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953 for_all_tx_queues(adapter, txo, i)
2954 be_cq_notify(adapter, txo->cq.id, true, 0);
2955
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002956 be_async_mcc_enable(adapter);
2957
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002958 for_all_evt_queues(adapter, eqo, i) {
2959 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302960 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302961 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002962 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002963 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964
Sathya Perla323ff712012-09-28 04:39:43 +00002965 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002966 if (!status)
2967 be_link_status_update(adapter, link_status);
2968
Sathya Perlafba87552013-05-08 02:05:50 +00002969 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002970 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302971
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302972#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302973 if (skyhawk_chip(adapter))
2974 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302975#endif
2976
Sathya Perla889cd4b2010-05-30 23:33:45 +00002977 return 0;
2978err:
2979 be_close(adapter->netdev);
2980 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002981}
2982
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002983static int be_setup_wol(struct be_adapter *adapter, bool enable)
2984{
2985 struct be_dma_mem cmd;
2986 int status = 0;
2987 u8 mac[ETH_ALEN];
2988
2989 memset(mac, 0, ETH_ALEN);
2990
2991 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002992 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2993 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302994 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302995 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002996
2997 if (enable) {
2998 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302999 PCICFG_PM_CONTROL_OFFSET,
3000 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003001 if (status) {
3002 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003003 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003004 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3005 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003006 return status;
3007 }
3008 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303009 adapter->netdev->dev_addr,
3010 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003011 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3012 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3013 } else {
3014 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3015 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3016 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3017 }
3018
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003019 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003020 return status;
3021}
3022
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003023/*
3024 * Generate a seed MAC address from the PF MAC Address using jhash.
3025 * MAC Address for VFs are assigned incrementally starting from the seed.
3026 * These addresses are programmed in the ASIC by the PF and the VF driver
3027 * queries for the MAC address during its probe.
3028 */
Sathya Perla4c876612013-02-03 20:30:11 +00003029static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003030{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003031 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003032 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003033 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003034 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003035
3036 be_vf_eth_addr_generate(adapter, mac);
3037
Sathya Perla11ac75e2011-12-13 00:58:50 +00003038 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303039 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003040 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003041 vf_cfg->if_handle,
3042 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303043 else
3044 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3045 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003046
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003047 if (status)
3048 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303049 "Mac address assignment failed for VF %d\n",
3050 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003051 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003052 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003053
3054 mac[5] += 1;
3055 }
3056 return status;
3057}
3058
Sathya Perla4c876612013-02-03 20:30:11 +00003059static int be_vfs_mac_query(struct be_adapter *adapter)
3060{
3061 int status, vf;
3062 u8 mac[ETH_ALEN];
3063 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003064
3065 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303066 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3067 mac, vf_cfg->if_handle,
3068 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003069 if (status)
3070 return status;
3071 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3072 }
3073 return 0;
3074}
3075
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003076static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003077{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003078 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003079 u32 vf;
3080
Sathya Perla257a3fe2013-06-14 15:54:51 +05303081 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003082 dev_warn(&adapter->pdev->dev,
3083 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003084 goto done;
3085 }
3086
Sathya Perlab4c1df92013-05-08 02:05:47 +00003087 pci_disable_sriov(adapter->pdev);
3088
Sathya Perla11ac75e2011-12-13 00:58:50 +00003089 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303090 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003091 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3092 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303093 else
3094 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3095 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003096
Sathya Perla11ac75e2011-12-13 00:58:50 +00003097 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3098 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003099done:
3100 kfree(adapter->vf_cfg);
3101 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303102 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003103}
3104
Sathya Perla77071332013-08-27 16:57:34 +05303105static void be_clear_queues(struct be_adapter *adapter)
3106{
3107 be_mcc_queues_destroy(adapter);
3108 be_rx_cqs_destroy(adapter);
3109 be_tx_queues_destroy(adapter);
3110 be_evt_queues_destroy(adapter);
3111}
3112
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303113static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003114{
Sathya Perla191eb752012-02-23 18:50:13 +00003115 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3116 cancel_delayed_work_sync(&adapter->work);
3117 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3118 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303119}
3120
Somnath Koturb05004a2013-12-05 12:08:16 +05303121static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303122{
3123 int i;
3124
Somnath Koturb05004a2013-12-05 12:08:16 +05303125 if (adapter->pmac_id) {
3126 for (i = 0; i < (adapter->uc_macs + 1); i++)
3127 be_cmd_pmac_del(adapter, adapter->if_handle,
3128 adapter->pmac_id[i], 0);
3129 adapter->uc_macs = 0;
3130
3131 kfree(adapter->pmac_id);
3132 adapter->pmac_id = NULL;
3133 }
3134}
3135
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303136#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303137static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3138{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003139 struct net_device *netdev = adapter->netdev;
3140
Sathya Perlac9c47142014-03-27 10:46:19 +05303141 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3142 be_cmd_manage_iface(adapter, adapter->if_handle,
3143 OP_CONVERT_TUNNEL_TO_NORMAL);
3144
3145 if (adapter->vxlan_port)
3146 be_cmd_set_vxlan_port(adapter, 0);
3147
3148 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3149 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003150
3151 netdev->hw_enc_features = 0;
3152 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303153 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303154}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303155#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303156
Somnath Koturb05004a2013-12-05 12:08:16 +05303157static int be_clear(struct be_adapter *adapter)
3158{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303159 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003160
Sathya Perla11ac75e2011-12-13 00:58:50 +00003161 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003162 be_vf_clear(adapter);
3163
Vasundhara Volambec84e62014-06-30 13:01:32 +05303164 /* Re-configure FW to distribute resources evenly across max-supported
3165 * number of VFs, only when VFs are not already enabled.
3166 */
3167 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3168 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3169 pci_sriov_get_totalvfs(adapter->pdev));
3170
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303171#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303172 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303173#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303174 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303175 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003176
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003177 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003178
Sathya Perla77071332013-08-27 16:57:34 +05303179 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003180
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003181 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303182 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003183 return 0;
3184}
3185
Sathya Perla4c876612013-02-03 20:30:11 +00003186static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003187{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303188 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003189 struct be_vf_cfg *vf_cfg;
3190 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003191 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003192
Sathya Perla4c876612013-02-03 20:30:11 +00003193 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3194 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003195
Sathya Perla4c876612013-02-03 20:30:11 +00003196 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303197 if (!BE3_chip(adapter)) {
3198 status = be_cmd_get_profile_config(adapter, &res,
3199 vf + 1);
3200 if (!status)
3201 cap_flags = res.if_cap_flags;
3202 }
Sathya Perla4c876612013-02-03 20:30:11 +00003203
3204 /* If a FW profile exists, then cap_flags are updated */
3205 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303206 BE_IF_FLAGS_BROADCAST |
3207 BE_IF_FLAGS_MULTICAST);
3208 status =
3209 be_cmd_if_create(adapter, cap_flags, en_flags,
3210 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003211 if (status)
3212 goto err;
3213 }
3214err:
3215 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003216}
3217
Sathya Perla39f1d942012-05-08 19:41:24 +00003218static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003219{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003220 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003221 int vf;
3222
Sathya Perla39f1d942012-05-08 19:41:24 +00003223 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3224 GFP_KERNEL);
3225 if (!adapter->vf_cfg)
3226 return -ENOMEM;
3227
Sathya Perla11ac75e2011-12-13 00:58:50 +00003228 for_all_vfs(adapter, vf_cfg, vf) {
3229 vf_cfg->if_handle = -1;
3230 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003231 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003232 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003233}
3234
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003235static int be_vf_setup(struct be_adapter *adapter)
3236{
Sathya Perla4c876612013-02-03 20:30:11 +00003237 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303238 struct be_vf_cfg *vf_cfg;
3239 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303240 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003241
Sathya Perla257a3fe2013-06-14 15:54:51 +05303242 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003243
3244 status = be_vf_setup_init(adapter);
3245 if (status)
3246 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003247
Sathya Perla4c876612013-02-03 20:30:11 +00003248 if (old_vfs) {
3249 for_all_vfs(adapter, vf_cfg, vf) {
3250 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3251 if (status)
3252 goto err;
3253 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003254
Sathya Perla4c876612013-02-03 20:30:11 +00003255 status = be_vfs_mac_query(adapter);
3256 if (status)
3257 goto err;
3258 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303259 status = be_vfs_if_create(adapter);
3260 if (status)
3261 goto err;
3262
Sathya Perla39f1d942012-05-08 19:41:24 +00003263 status = be_vf_eth_addr_config(adapter);
3264 if (status)
3265 goto err;
3266 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003267
Sathya Perla11ac75e2011-12-13 00:58:50 +00003268 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303269 /* Allow VFs to programs MAC/VLAN filters */
3270 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3271 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3272 status = be_cmd_set_fn_privileges(adapter,
3273 privileges |
3274 BE_PRIV_FILTMGMT,
3275 vf + 1);
3276 if (!status)
3277 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3278 vf);
3279 }
3280
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303281 /* Allow full available bandwidth */
3282 if (!old_vfs)
3283 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003284
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303285 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303286 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303287 be_cmd_set_logical_link_config(adapter,
3288 IFLA_VF_LINK_STATE_AUTO,
3289 vf+1);
3290 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003291 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003292
3293 if (!old_vfs) {
3294 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3295 if (status) {
3296 dev_err(dev, "SRIOV enable failed\n");
3297 adapter->num_vfs = 0;
3298 goto err;
3299 }
3300 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303301
3302 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003303 return 0;
3304err:
Sathya Perla4c876612013-02-03 20:30:11 +00003305 dev_err(dev, "VF setup failed\n");
3306 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003307 return status;
3308}
3309
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303310/* Converting function_mode bits on BE3 to SH mc_type enums */
3311
3312static u8 be_convert_mc_type(u32 function_mode)
3313{
Suresh Reddy66064db2014-06-23 16:41:29 +05303314 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303315 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303316 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303317 return FLEX10;
3318 else if (function_mode & VNIC_MODE)
3319 return vNIC2;
3320 else if (function_mode & UMC_ENABLED)
3321 return UMC;
3322 else
3323 return MC_NONE;
3324}
3325
Sathya Perla92bf14a2013-08-27 16:57:32 +05303326/* On BE2/BE3 FW does not suggest the supported limits */
3327static void BEx_get_resources(struct be_adapter *adapter,
3328 struct be_resources *res)
3329{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303330 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303331
3332 if (be_physfn(adapter))
3333 res->max_uc_mac = BE_UC_PMAC_COUNT;
3334 else
3335 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3336
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303337 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3338
3339 if (be_is_mc(adapter)) {
3340 /* Assuming that there are 4 channels per port,
3341 * when multi-channel is enabled
3342 */
3343 if (be_is_qnq_mode(adapter))
3344 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3345 else
3346 /* In a non-qnq multichannel mode, the pvid
3347 * takes up one vlan entry
3348 */
3349 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3350 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303351 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303352 }
3353
Sathya Perla92bf14a2013-08-27 16:57:32 +05303354 res->max_mcast_mac = BE_MAX_MC;
3355
Vasundhara Volama5243da2014-03-11 18:53:07 +05303356 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3357 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3358 * *only* if it is RSS-capable.
3359 */
3360 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3361 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303362 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303363 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303364 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3365 struct be_resources super_nic_res = {0};
3366
3367 /* On a SuperNIC profile, the driver needs to use the
3368 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3369 */
3370 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3371 /* Some old versions of BE3 FW don't report max_tx_qs value */
3372 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3373 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303374 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303375 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303376
3377 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3378 !use_sriov && be_physfn(adapter))
3379 res->max_rss_qs = (adapter->be3_native) ?
3380 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3381 res->max_rx_qs = res->max_rss_qs + 1;
3382
Suresh Reddye3dc8672014-01-06 13:02:25 +05303383 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303384 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303385 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3386 else
3387 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303388
3389 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3390 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3391 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3392}
3393
Sathya Perla30128032011-11-10 19:17:57 +00003394static void be_setup_init(struct be_adapter *adapter)
3395{
3396 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003397 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003398 adapter->if_handle = -1;
3399 adapter->be3_native = false;
3400 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003401 if (be_physfn(adapter))
3402 adapter->cmd_privileges = MAX_PRIVILEGES;
3403 else
3404 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003405}
3406
Vasundhara Volambec84e62014-06-30 13:01:32 +05303407static int be_get_sriov_config(struct be_adapter *adapter)
3408{
3409 struct device *dev = &adapter->pdev->dev;
3410 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303411 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303412
3413 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303414 be_cmd_get_profile_config(adapter, &res, 0);
3415
Vasundhara Volambec84e62014-06-30 13:01:32 +05303416 if (BE3_chip(adapter) && !res.max_vfs) {
3417 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3418 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3419 }
3420
Sathya Perlad3d18312014-08-01 17:47:30 +05303421 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303422
3423 if (!be_max_vfs(adapter)) {
3424 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303425 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303426 adapter->num_vfs = 0;
3427 return 0;
3428 }
3429
Sathya Perlad3d18312014-08-01 17:47:30 +05303430 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3431
Vasundhara Volambec84e62014-06-30 13:01:32 +05303432 /* validate num_vfs module param */
3433 old_vfs = pci_num_vf(adapter->pdev);
3434 if (old_vfs) {
3435 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3436 if (old_vfs != num_vfs)
3437 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3438 adapter->num_vfs = old_vfs;
3439 } else {
3440 if (num_vfs > be_max_vfs(adapter)) {
3441 dev_info(dev, "Resources unavailable to init %d VFs\n",
3442 num_vfs);
3443 dev_info(dev, "Limiting to %d VFs\n",
3444 be_max_vfs(adapter));
3445 }
3446 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3447 }
3448
3449 return 0;
3450}
3451
Sathya Perla92bf14a2013-08-27 16:57:32 +05303452static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003453{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303454 struct device *dev = &adapter->pdev->dev;
3455 struct be_resources res = {0};
3456 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003457
Sathya Perla92bf14a2013-08-27 16:57:32 +05303458 if (BEx_chip(adapter)) {
3459 BEx_get_resources(adapter, &res);
3460 adapter->res = res;
3461 }
3462
Sathya Perla92bf14a2013-08-27 16:57:32 +05303463 /* For Lancer, SH etc read per-function resource limits from FW.
3464 * GET_FUNC_CONFIG returns per function guaranteed limits.
3465 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3466 */
Sathya Perla4c876612013-02-03 20:30:11 +00003467 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303468 status = be_cmd_get_func_config(adapter, &res);
3469 if (status)
3470 return status;
3471
3472 /* If RoCE may be enabled stash away half the EQs for RoCE */
3473 if (be_roce_supported(adapter))
3474 res.max_evt_qs /= 2;
3475 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003476 }
3477
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303478 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3479 be_max_txqs(adapter), be_max_rxqs(adapter),
3480 be_max_rss(adapter), be_max_eqs(adapter),
3481 be_max_vfs(adapter));
3482 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3483 be_max_uc(adapter), be_max_mc(adapter),
3484 be_max_vlans(adapter));
3485
Sathya Perla92bf14a2013-08-27 16:57:32 +05303486 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003487}
3488
Sathya Perlad3d18312014-08-01 17:47:30 +05303489static void be_sriov_config(struct be_adapter *adapter)
3490{
3491 struct device *dev = &adapter->pdev->dev;
3492 int status;
3493
3494 status = be_get_sriov_config(adapter);
3495 if (status) {
3496 dev_err(dev, "Failed to query SR-IOV configuration\n");
3497 dev_err(dev, "SR-IOV cannot be enabled\n");
3498 return;
3499 }
3500
3501 /* When the HW is in SRIOV capable configuration, the PF-pool
3502 * resources are equally distributed across the max-number of
3503 * VFs. The user may request only a subset of the max-vfs to be
3504 * enabled. Based on num_vfs, redistribute the resources across
3505 * num_vfs so that each VF will have access to more number of
3506 * resources. This facility is not available in BE3 FW.
3507 * Also, this is done by FW in Lancer chip.
3508 */
3509 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3510 status = be_cmd_set_sriov_config(adapter,
3511 adapter->pool_res,
3512 adapter->num_vfs);
3513 if (status)
3514 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3515 }
3516}
3517
Sathya Perla39f1d942012-05-08 19:41:24 +00003518static int be_get_config(struct be_adapter *adapter)
3519{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303520 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003521 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003522
Kalesh APe97e3cd2014-07-17 16:20:26 +05303523 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003524 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303525 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003526
Vasundhara Volam542963b2014-01-15 13:23:33 +05303527 if (be_physfn(adapter)) {
3528 status = be_cmd_get_active_profile(adapter, &profile_id);
3529 if (!status)
3530 dev_info(&adapter->pdev->dev,
3531 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303532 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303533
Sathya Perlad3d18312014-08-01 17:47:30 +05303534 if (!BE2_chip(adapter) && be_physfn(adapter))
3535 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303536
Sathya Perla92bf14a2013-08-27 16:57:32 +05303537 status = be_get_resources(adapter);
3538 if (status)
3539 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003540
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303541 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3542 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303543 if (!adapter->pmac_id)
3544 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003545
Sathya Perla92bf14a2013-08-27 16:57:32 +05303546 /* Sanitize cfg_num_qs based on HW and platform limits */
3547 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3548
3549 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003550}
3551
Sathya Perla95046b92013-07-23 15:25:02 +05303552static int be_mac_setup(struct be_adapter *adapter)
3553{
3554 u8 mac[ETH_ALEN];
3555 int status;
3556
3557 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3558 status = be_cmd_get_perm_mac(adapter, mac);
3559 if (status)
3560 return status;
3561
3562 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3563 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3564 } else {
3565 /* Maybe the HW was reset; dev_addr must be re-programmed */
3566 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3567 }
3568
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003569 /* For BE3-R VFs, the PF programs the initial MAC address */
3570 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3571 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3572 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303573 return 0;
3574}
3575
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303576static void be_schedule_worker(struct be_adapter *adapter)
3577{
3578 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3579 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3580}
3581
Sathya Perla77071332013-08-27 16:57:34 +05303582static int be_setup_queues(struct be_adapter *adapter)
3583{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303584 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303585 int status;
3586
3587 status = be_evt_queues_create(adapter);
3588 if (status)
3589 goto err;
3590
3591 status = be_tx_qs_create(adapter);
3592 if (status)
3593 goto err;
3594
3595 status = be_rx_cqs_create(adapter);
3596 if (status)
3597 goto err;
3598
3599 status = be_mcc_queues_create(adapter);
3600 if (status)
3601 goto err;
3602
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303603 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3604 if (status)
3605 goto err;
3606
3607 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3608 if (status)
3609 goto err;
3610
Sathya Perla77071332013-08-27 16:57:34 +05303611 return 0;
3612err:
3613 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3614 return status;
3615}
3616
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303617int be_update_queues(struct be_adapter *adapter)
3618{
3619 struct net_device *netdev = adapter->netdev;
3620 int status;
3621
3622 if (netif_running(netdev))
3623 be_close(netdev);
3624
3625 be_cancel_worker(adapter);
3626
3627 /* If any vectors have been shared with RoCE we cannot re-program
3628 * the MSIx table.
3629 */
3630 if (!adapter->num_msix_roce_vec)
3631 be_msix_disable(adapter);
3632
3633 be_clear_queues(adapter);
3634
3635 if (!msix_enabled(adapter)) {
3636 status = be_msix_enable(adapter);
3637 if (status)
3638 return status;
3639 }
3640
3641 status = be_setup_queues(adapter);
3642 if (status)
3643 return status;
3644
3645 be_schedule_worker(adapter);
3646
3647 if (netif_running(netdev))
3648 status = be_open(netdev);
3649
3650 return status;
3651}
3652
Sathya Perla5fb379e2009-06-18 00:02:59 +00003653static int be_setup(struct be_adapter *adapter)
3654{
Sathya Perla39f1d942012-05-08 19:41:24 +00003655 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303656 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003657 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003658
Sathya Perla30128032011-11-10 19:17:57 +00003659 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003660
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003661 if (!lancer_chip(adapter))
3662 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003663
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003664 status = be_get_config(adapter);
3665 if (status)
3666 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003667
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003668 status = be_msix_enable(adapter);
3669 if (status)
3670 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003671
Sathya Perla77071332013-08-27 16:57:34 +05303672 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3673 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3674 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3675 en_flags |= BE_IF_FLAGS_RSS;
3676 en_flags = en_flags & be_if_cap_flags(adapter);
3677 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3678 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003679 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003680 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003681
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303682 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3683 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303684 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303685 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003687 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003688
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003689 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003690
Sathya Perla95046b92013-07-23 15:25:02 +05303691 status = be_mac_setup(adapter);
3692 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003693 goto err;
3694
Kalesh APe97e3cd2014-07-17 16:20:26 +05303695 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303696 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003697
Somnath Koture9e2a902013-10-24 14:37:53 +05303698 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303699 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303700 adapter->fw_ver);
3701 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3702 }
3703
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003704 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003705 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003706
3707 be_set_rx_mode(adapter->netdev);
3708
Suresh Reddy76a9e082014-01-15 13:23:40 +05303709 be_cmd_get_acpi_wol_cap(adapter);
3710
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003711 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003712
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003713 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3714 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003715 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003716
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303717 if (be_physfn(adapter))
3718 be_cmd_set_logical_link_config(adapter,
3719 IFLA_VF_LINK_STATE_AUTO, 0);
3720
Vasundhara Volambec84e62014-06-30 13:01:32 +05303721 if (adapter->num_vfs)
3722 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003723
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003724 status = be_cmd_get_phy_info(adapter);
3725 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003726 adapter->phy.fc_autoneg = 1;
3727
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303728 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303729 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003730 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003731err:
3732 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003733 return status;
3734}
3735
Ivan Vecera66268732011-12-08 01:31:21 +00003736#ifdef CONFIG_NET_POLL_CONTROLLER
3737static void be_netpoll(struct net_device *netdev)
3738{
3739 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003740 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003741 int i;
3742
Sathya Perlae49cc342012-11-27 19:50:02 +00003743 for_all_evt_queues(adapter, eqo, i) {
3744 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3745 napi_schedule(&eqo->napi);
3746 }
Ivan Vecera66268732011-12-08 01:31:21 +00003747}
3748#endif
3749
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303750static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003751
Sathya Perla306f1342011-08-02 19:57:45 +00003752static bool phy_flashing_required(struct be_adapter *adapter)
3753{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003754 return (adapter->phy.phy_type == TN_8022 &&
3755 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003756}
3757
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003758static bool is_comp_in_ufi(struct be_adapter *adapter,
3759 struct flash_section_info *fsec, int type)
3760{
3761 int i = 0, img_type = 0;
3762 struct flash_section_info_g2 *fsec_g2 = NULL;
3763
Sathya Perlaca34fe32012-11-06 17:48:56 +00003764 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003765 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3766
3767 for (i = 0; i < MAX_FLASH_COMP; i++) {
3768 if (fsec_g2)
3769 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3770 else
3771 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3772
3773 if (img_type == type)
3774 return true;
3775 }
3776 return false;
3777
3778}
3779
Jingoo Han4188e7d2013-08-05 18:02:02 +09003780static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303781 int header_size,
3782 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003783{
3784 struct flash_section_info *fsec = NULL;
3785 const u8 *p = fw->data;
3786
3787 p += header_size;
3788 while (p < (fw->data + fw->size)) {
3789 fsec = (struct flash_section_info *)p;
3790 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3791 return fsec;
3792 p += 32;
3793 }
3794 return NULL;
3795}
3796
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303797static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3798 u32 img_offset, u32 img_size, int hdr_size,
3799 u16 img_optype, bool *crc_match)
3800{
3801 u32 crc_offset;
3802 int status;
3803 u8 crc[4];
3804
3805 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3806 if (status)
3807 return status;
3808
3809 crc_offset = hdr_size + img_offset + img_size - 4;
3810
3811 /* Skip flashing, if crc of flashed region matches */
3812 if (!memcmp(crc, p + crc_offset, 4))
3813 *crc_match = true;
3814 else
3815 *crc_match = false;
3816
3817 return status;
3818}
3819
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003820static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303821 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003822{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003823 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303824 u32 total_bytes, flash_op, num_bytes;
3825 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003826
3827 total_bytes = img_size;
3828 while (total_bytes) {
3829 num_bytes = min_t(u32, 32*1024, total_bytes);
3830
3831 total_bytes -= num_bytes;
3832
3833 if (!total_bytes) {
3834 if (optype == OPTYPE_PHY_FW)
3835 flash_op = FLASHROM_OPER_PHY_FLASH;
3836 else
3837 flash_op = FLASHROM_OPER_FLASH;
3838 } else {
3839 if (optype == OPTYPE_PHY_FW)
3840 flash_op = FLASHROM_OPER_PHY_SAVE;
3841 else
3842 flash_op = FLASHROM_OPER_SAVE;
3843 }
3844
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003845 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003846 img += num_bytes;
3847 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303848 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303849 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303850 optype == OPTYPE_PHY_FW)
3851 break;
3852 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003853 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003854 }
3855 return 0;
3856}
3857
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003858/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003859static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303860 const struct firmware *fw,
3861 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003862{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003863 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303864 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003865 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303866 int status, i, filehdr_size, num_comp;
3867 const struct flash_comp *pflashcomp;
3868 bool crc_match;
3869 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003870
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003871 struct flash_comp gen3_flash_types[] = {
3872 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3873 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3874 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3875 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3876 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3877 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3878 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3879 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3880 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3881 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3882 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3883 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3884 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3885 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3886 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3887 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3888 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3889 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3890 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3891 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003892 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003893
3894 struct flash_comp gen2_flash_types[] = {
3895 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3896 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3897 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3898 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3899 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3900 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3901 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3902 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3903 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3904 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3905 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3906 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3907 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3908 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3909 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3910 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003911 };
3912
Sathya Perlaca34fe32012-11-06 17:48:56 +00003913 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003914 pflashcomp = gen3_flash_types;
3915 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003916 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003917 } else {
3918 pflashcomp = gen2_flash_types;
3919 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003920 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003921 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003922
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003923 /* Get flash section info*/
3924 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3925 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303926 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003927 return -1;
3928 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003929 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003930 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003931 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003932
3933 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3934 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3935 continue;
3936
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003937 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3938 !phy_flashing_required(adapter))
3939 continue;
3940
3941 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303942 status = be_check_flash_crc(adapter, fw->data,
3943 pflashcomp[i].offset,
3944 pflashcomp[i].size,
3945 filehdr_size +
3946 img_hdrs_size,
3947 OPTYPE_REDBOOT, &crc_match);
3948 if (status) {
3949 dev_err(dev,
3950 "Could not get CRC for 0x%x region\n",
3951 pflashcomp[i].optype);
3952 continue;
3953 }
3954
3955 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003956 continue;
3957 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003958
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303959 p = fw->data + filehdr_size + pflashcomp[i].offset +
3960 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003961 if (p + pflashcomp[i].size > fw->data + fw->size)
3962 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003963
3964 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303965 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003966 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303967 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003968 pflashcomp[i].img_type);
3969 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003970 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003971 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003972 return 0;
3973}
3974
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303975static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3976{
3977 u32 img_type = le32_to_cpu(fsec_entry.type);
3978 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3979
3980 if (img_optype != 0xFFFF)
3981 return img_optype;
3982
3983 switch (img_type) {
3984 case IMAGE_FIRMWARE_iSCSI:
3985 img_optype = OPTYPE_ISCSI_ACTIVE;
3986 break;
3987 case IMAGE_BOOT_CODE:
3988 img_optype = OPTYPE_REDBOOT;
3989 break;
3990 case IMAGE_OPTION_ROM_ISCSI:
3991 img_optype = OPTYPE_BIOS;
3992 break;
3993 case IMAGE_OPTION_ROM_PXE:
3994 img_optype = OPTYPE_PXE_BIOS;
3995 break;
3996 case IMAGE_OPTION_ROM_FCoE:
3997 img_optype = OPTYPE_FCOE_BIOS;
3998 break;
3999 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4000 img_optype = OPTYPE_ISCSI_BACKUP;
4001 break;
4002 case IMAGE_NCSI:
4003 img_optype = OPTYPE_NCSI_FW;
4004 break;
4005 case IMAGE_FLASHISM_JUMPVECTOR:
4006 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4007 break;
4008 case IMAGE_FIRMWARE_PHY:
4009 img_optype = OPTYPE_SH_PHY_FW;
4010 break;
4011 case IMAGE_REDBOOT_DIR:
4012 img_optype = OPTYPE_REDBOOT_DIR;
4013 break;
4014 case IMAGE_REDBOOT_CONFIG:
4015 img_optype = OPTYPE_REDBOOT_CONFIG;
4016 break;
4017 case IMAGE_UFI_DIR:
4018 img_optype = OPTYPE_UFI_DIR;
4019 break;
4020 default:
4021 break;
4022 }
4023
4024 return img_optype;
4025}
4026
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004027static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304028 const struct firmware *fw,
4029 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004030{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004031 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304032 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004033 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304034 u32 img_offset, img_size, img_type;
4035 int status, i, filehdr_size;
4036 bool crc_match, old_fw_img;
4037 u16 img_optype;
4038 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004039
4040 filehdr_size = sizeof(struct flash_file_hdr_g3);
4041 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4042 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304043 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304044 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004045 }
4046
4047 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4048 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4049 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304050 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4051 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4052 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004053
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304054 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004055 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304056 /* Don't bother verifying CRC if an old FW image is being
4057 * flashed
4058 */
4059 if (old_fw_img)
4060 goto flash;
4061
4062 status = be_check_flash_crc(adapter, fw->data, img_offset,
4063 img_size, filehdr_size +
4064 img_hdrs_size, img_optype,
4065 &crc_match);
4066 /* The current FW image on the card does not recognize the new
4067 * FLASH op_type. The FW download is partially complete.
4068 * Reboot the server now to enable FW image to recognize the
4069 * new FLASH op_type. To complete the remaining process,
4070 * download the same FW again after the reboot.
4071 */
Kalesh AP4c600052014-05-30 19:06:26 +05304072 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4073 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304074 dev_err(dev, "Flash incomplete. Reset the server\n");
4075 dev_err(dev, "Download FW image again after reset\n");
4076 return -EAGAIN;
4077 } else if (status) {
4078 dev_err(dev, "Could not get CRC for 0x%x region\n",
4079 img_optype);
4080 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004081 }
4082
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304083 if (crc_match)
4084 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004085
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304086flash:
4087 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004088 if (p + img_size > fw->data + fw->size)
4089 return -1;
4090
4091 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304092 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4093 * UFI_DIR region
4094 */
Kalesh AP4c600052014-05-30 19:06:26 +05304095 if (old_fw_img &&
4096 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4097 (img_optype == OPTYPE_UFI_DIR &&
4098 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304099 continue;
4100 } else if (status) {
4101 dev_err(dev, "Flashing section type 0x%x failed\n",
4102 img_type);
4103 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004104 }
4105 }
4106 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004107}
4108
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004109static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304110 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004111{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004112#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4113#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304114 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004115 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004116 const u8 *data_ptr = NULL;
4117 u8 *dest_image_ptr = NULL;
4118 size_t image_size = 0;
4119 u32 chunk_size = 0;
4120 u32 data_written = 0;
4121 u32 offset = 0;
4122 int status = 0;
4123 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004124 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004125
4126 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304127 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304128 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004129 }
4130
4131 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4132 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304133 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004134 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304135 if (!flash_cmd.va)
4136 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004137
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004138 dest_image_ptr = flash_cmd.va +
4139 sizeof(struct lancer_cmd_req_write_object);
4140 image_size = fw->size;
4141 data_ptr = fw->data;
4142
4143 while (image_size) {
4144 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4145
4146 /* Copy the image chunk content. */
4147 memcpy(dest_image_ptr, data_ptr, chunk_size);
4148
4149 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004150 chunk_size, offset,
4151 LANCER_FW_DOWNLOAD_LOCATION,
4152 &data_written, &change_status,
4153 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004154 if (status)
4155 break;
4156
4157 offset += data_written;
4158 data_ptr += data_written;
4159 image_size -= data_written;
4160 }
4161
4162 if (!status) {
4163 /* Commit the FW written */
4164 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004165 0, offset,
4166 LANCER_FW_DOWNLOAD_LOCATION,
4167 &data_written, &change_status,
4168 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004169 }
4170
Kalesh APbb864e02014-09-02 09:56:51 +05304171 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004172 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304173 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304174 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004175 }
4176
Kalesh APbb864e02014-09-02 09:56:51 +05304177 dev_info(dev, "Firmware flashed successfully\n");
4178
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004179 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304180 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004181 status = lancer_physdev_ctrl(adapter,
4182 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004183 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304184 dev_err(dev, "Adapter busy, could not reset FW\n");
4185 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004186 }
4187 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304188 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004189 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304190
4191 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004192}
4193
Sathya Perlaca34fe32012-11-06 17:48:56 +00004194#define UFI_TYPE2 2
4195#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004196#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004197#define UFI_TYPE4 4
4198static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004199 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004200{
Kalesh APddf11692014-07-17 16:20:28 +05304201 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004202 goto be_get_ufi_exit;
4203
Sathya Perlaca34fe32012-11-06 17:48:56 +00004204 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4205 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004206 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4207 if (fhdr->asic_type_rev == 0x10)
4208 return UFI_TYPE3R;
4209 else
4210 return UFI_TYPE3;
4211 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004212 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004213
4214be_get_ufi_exit:
4215 dev_err(&adapter->pdev->dev,
4216 "UFI and Interface are not compatible for flashing\n");
4217 return -1;
4218}
4219
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004220static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4221{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004222 struct flash_file_hdr_g3 *fhdr3;
4223 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004224 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004225 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004226 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004227
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004228 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004229 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4230 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004231 if (!flash_cmd.va) {
4232 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004233 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004234 }
4235
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004236 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004237 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004238
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004239 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004240
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004241 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4242 for (i = 0; i < num_imgs; i++) {
4243 img_hdr_ptr = (struct image_hdr *)(fw->data +
4244 (sizeof(struct flash_file_hdr_g3) +
4245 i * sizeof(struct image_hdr)));
4246 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004247 switch (ufi_type) {
4248 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004249 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304250 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004251 break;
4252 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004253 status = be_flash_BEx(adapter, fw, &flash_cmd,
4254 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004255 break;
4256 case UFI_TYPE3:
4257 /* Do not flash this ufi on BE3-R cards */
4258 if (adapter->asic_rev < 0x10)
4259 status = be_flash_BEx(adapter, fw,
4260 &flash_cmd,
4261 num_imgs);
4262 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304263 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004264 dev_err(&adapter->pdev->dev,
4265 "Can't load BE3 UFI on BE3R\n");
4266 }
4267 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004268 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004269 }
4270
Sathya Perlaca34fe32012-11-06 17:48:56 +00004271 if (ufi_type == UFI_TYPE2)
4272 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004273 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304274 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004275
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004276 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4277 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004278 if (status) {
4279 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004280 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004281 }
4282
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004283 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004284
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004285be_fw_exit:
4286 return status;
4287}
4288
4289int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4290{
4291 const struct firmware *fw;
4292 int status;
4293
4294 if (!netif_running(adapter->netdev)) {
4295 dev_err(&adapter->pdev->dev,
4296 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304297 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004298 }
4299
4300 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4301 if (status)
4302 goto fw_exit;
4303
4304 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4305
4306 if (lancer_chip(adapter))
4307 status = lancer_fw_download(adapter, fw);
4308 else
4309 status = be_fw_download(adapter, fw);
4310
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004311 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304312 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004313
Ajit Khaparde84517482009-09-04 03:12:16 +00004314fw_exit:
4315 release_firmware(fw);
4316 return status;
4317}
4318
Sathya Perla748b5392014-05-09 13:29:13 +05304319static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004320{
4321 struct be_adapter *adapter = netdev_priv(dev);
4322 struct nlattr *attr, *br_spec;
4323 int rem;
4324 int status = 0;
4325 u16 mode = 0;
4326
4327 if (!sriov_enabled(adapter))
4328 return -EOPNOTSUPP;
4329
4330 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004331 if (!br_spec)
4332 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004333
4334 nla_for_each_nested(attr, br_spec, rem) {
4335 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4336 continue;
4337
Thomas Grafb7c1a312014-11-26 13:42:17 +01004338 if (nla_len(attr) < sizeof(mode))
4339 return -EINVAL;
4340
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004341 mode = nla_get_u16(attr);
4342 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4343 return -EINVAL;
4344
4345 status = be_cmd_set_hsw_config(adapter, 0, 0,
4346 adapter->if_handle,
4347 mode == BRIDGE_MODE_VEPA ?
4348 PORT_FWD_TYPE_VEPA :
4349 PORT_FWD_TYPE_VEB);
4350 if (status)
4351 goto err;
4352
4353 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4354 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4355
4356 return status;
4357 }
4358err:
4359 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4360 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4361
4362 return status;
4363}
4364
4365static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304366 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004367{
4368 struct be_adapter *adapter = netdev_priv(dev);
4369 int status = 0;
4370 u8 hsw_mode;
4371
4372 if (!sriov_enabled(adapter))
4373 return 0;
4374
4375 /* BE and Lancer chips support VEB mode only */
4376 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4377 hsw_mode = PORT_FWD_TYPE_VEB;
4378 } else {
4379 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4380 adapter->if_handle, &hsw_mode);
4381 if (status)
4382 return 0;
4383 }
4384
4385 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4386 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004387 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4388 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004389}
4390
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304391#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004392/* VxLAN offload Notes:
4393 *
4394 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4395 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4396 * is expected to work across all types of IP tunnels once exported. Skyhawk
4397 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4398 * offloads in hw_enc_features only when a VxLAN port is added. Note this only
4399 * ensures that other tunnels work fine while VxLAN offloads are not enabled.
4400 *
4401 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4402 * adds more than one port, disable offloads and don't re-enable them again
4403 * until after all the tunnels are removed.
4404 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304405static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4406 __be16 port)
4407{
4408 struct be_adapter *adapter = netdev_priv(netdev);
4409 struct device *dev = &adapter->pdev->dev;
4410 int status;
4411
4412 if (lancer_chip(adapter) || BEx_chip(adapter))
4413 return;
4414
4415 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304416 dev_info(dev,
4417 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004418 dev_info(dev, "Disabling VxLAN offloads\n");
4419 adapter->vxlan_port_count++;
4420 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304421 }
4422
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004423 if (adapter->vxlan_port_count++ >= 1)
4424 return;
4425
Sathya Perlac9c47142014-03-27 10:46:19 +05304426 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4427 OP_CONVERT_NORMAL_TO_TUNNEL);
4428 if (status) {
4429 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4430 goto err;
4431 }
4432
4433 status = be_cmd_set_vxlan_port(adapter, port);
4434 if (status) {
4435 dev_warn(dev, "Failed to add VxLAN port\n");
4436 goto err;
4437 }
4438 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4439 adapter->vxlan_port = port;
4440
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004441 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4442 NETIF_F_TSO | NETIF_F_TSO6 |
4443 NETIF_F_GSO_UDP_TUNNEL;
4444 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304445 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004446
Sathya Perlac9c47142014-03-27 10:46:19 +05304447 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4448 be16_to_cpu(port));
4449 return;
4450err:
4451 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304452}
4453
4454static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4455 __be16 port)
4456{
4457 struct be_adapter *adapter = netdev_priv(netdev);
4458
4459 if (lancer_chip(adapter) || BEx_chip(adapter))
4460 return;
4461
4462 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004463 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304464
4465 be_disable_vxlan_offloads(adapter);
4466
4467 dev_info(&adapter->pdev->dev,
4468 "Disabled VxLAN offloads for UDP port %d\n",
4469 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004470done:
4471 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304472}
Joe Stringer725d5482014-11-13 16:38:13 -08004473
Jesse Gross5f352272014-12-23 22:37:26 -08004474static netdev_features_t be_features_check(struct sk_buff *skb,
4475 struct net_device *dev,
4476 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004477{
Jesse Gross5f352272014-12-23 22:37:26 -08004478 return vxlan_features_check(skb, features);
Joe Stringer725d5482014-11-13 16:38:13 -08004479}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304480#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304481
stephen hemmingere5686ad2012-01-05 19:10:25 +00004482static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004483 .ndo_open = be_open,
4484 .ndo_stop = be_close,
4485 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004486 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004487 .ndo_set_mac_address = be_mac_addr_set,
4488 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004489 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004490 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004491 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4492 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004493 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004494 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004495 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004496 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304497 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004498#ifdef CONFIG_NET_POLL_CONTROLLER
4499 .ndo_poll_controller = be_netpoll,
4500#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004501 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4502 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304503#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304504 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304505#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304506#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304507 .ndo_add_vxlan_port = be_add_vxlan_port,
4508 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004509 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304510#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004511};
4512
4513static void be_netdev_init(struct net_device *netdev)
4514{
4515 struct be_adapter *adapter = netdev_priv(netdev);
4516
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004517 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004518 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004519 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004520 if (be_multi_rxq(adapter))
4521 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004522
4523 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004524 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004525
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004526 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004527 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004528
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004529 netdev->priv_flags |= IFF_UNICAST_FLT;
4530
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531 netdev->flags |= IFF_MULTICAST;
4532
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004533 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004534
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004535 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004536
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004537 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538}
4539
4540static void be_unmap_pci_bars(struct be_adapter *adapter)
4541{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004542 if (adapter->csr)
4543 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004544 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004545 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004546}
4547
Sathya Perlace66f782012-11-06 17:48:58 +00004548static int db_bar(struct be_adapter *adapter)
4549{
4550 if (lancer_chip(adapter) || !be_physfn(adapter))
4551 return 0;
4552 else
4553 return 4;
4554}
4555
4556static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004557{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004558 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004559 adapter->roce_db.size = 4096;
4560 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4561 db_bar(adapter));
4562 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4563 db_bar(adapter));
4564 }
Parav Pandit045508a2012-03-26 14:27:13 +00004565 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566}
4567
4568static int be_map_pci_bars(struct be_adapter *adapter)
4569{
4570 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004571
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004572 if (BEx_chip(adapter) && be_physfn(adapter)) {
4573 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304574 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004575 return -ENOMEM;
4576 }
4577
Sathya Perlace66f782012-11-06 17:48:58 +00004578 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304579 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004580 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004581 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004582
4583 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004584 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004585
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004586pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304587 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004588 be_unmap_pci_bars(adapter);
4589 return -ENOMEM;
4590}
4591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004592static void be_ctrl_cleanup(struct be_adapter *adapter)
4593{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004594 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004595
4596 be_unmap_pci_bars(adapter);
4597
4598 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004599 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4600 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004601
Sathya Perla5b8821b2011-08-02 19:57:44 +00004602 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004603 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004604 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4605 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004606}
4607
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004608static int be_ctrl_init(struct be_adapter *adapter)
4609{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004610 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4611 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004612 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004613 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004614 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004615
Sathya Perlace66f782012-11-06 17:48:58 +00004616 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4617 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4618 SLI_INTF_FAMILY_SHIFT;
4619 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4620
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004621 status = be_map_pci_bars(adapter);
4622 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004623 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004624
4625 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004626 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4627 mbox_mem_alloc->size,
4628 &mbox_mem_alloc->dma,
4629 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004630 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004631 status = -ENOMEM;
4632 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004633 }
4634 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4635 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4636 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4637 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004638
Sathya Perla5b8821b2011-08-02 19:57:44 +00004639 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004640 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4641 rx_filter->size, &rx_filter->dma,
4642 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304643 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004644 status = -ENOMEM;
4645 goto free_mbox;
4646 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004647
Ivan Vecera29849612010-12-14 05:43:19 +00004648 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004649 spin_lock_init(&adapter->mcc_lock);
4650 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004651
Suresh Reddy5eeff632014-01-06 13:02:24 +05304652 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004653 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004654 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004655
4656free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004657 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4658 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004659
4660unmap_pci_bars:
4661 be_unmap_pci_bars(adapter);
4662
4663done:
4664 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004665}
4666
4667static void be_stats_cleanup(struct be_adapter *adapter)
4668{
Sathya Perla3abcded2010-10-03 22:12:27 -07004669 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004670
4671 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004672 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4673 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004674}
4675
4676static int be_stats_init(struct be_adapter *adapter)
4677{
Sathya Perla3abcded2010-10-03 22:12:27 -07004678 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004679
Sathya Perlaca34fe32012-11-06 17:48:56 +00004680 if (lancer_chip(adapter))
4681 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4682 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004683 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004684 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004685 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004686 else
4687 /* ALL non-BE ASICs */
4688 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004689
Joe Perchesede23fa2013-08-26 22:45:23 -07004690 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4691 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304692 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304693 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004694 return 0;
4695}
4696
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004697static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004698{
4699 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004700
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004701 if (!adapter)
4702 return;
4703
Parav Pandit045508a2012-03-26 14:27:13 +00004704 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004705 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004706
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004707 cancel_delayed_work_sync(&adapter->func_recovery_work);
4708
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004709 unregister_netdev(adapter->netdev);
4710
Sathya Perla5fb379e2009-06-18 00:02:59 +00004711 be_clear(adapter);
4712
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004713 /* tell fw we're done with firing cmds */
4714 be_cmd_fw_clean(adapter);
4715
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004716 be_stats_cleanup(adapter);
4717
4718 be_ctrl_cleanup(adapter);
4719
Sathya Perlad6b6d982012-09-05 01:56:48 +00004720 pci_disable_pcie_error_reporting(pdev);
4721
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004722 pci_release_regions(pdev);
4723 pci_disable_device(pdev);
4724
4725 free_netdev(adapter->netdev);
4726}
4727
Sathya Perla39f1d942012-05-08 19:41:24 +00004728static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004729{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304730 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004731
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004732 status = be_cmd_get_cntl_attributes(adapter);
4733 if (status)
4734 return status;
4735
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004736 /* Must be a power of 2 or else MODULO will BUG_ON */
4737 adapter->be_get_temp_freq = 64;
4738
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304739 if (BEx_chip(adapter)) {
4740 level = be_cmd_get_fw_log_level(adapter);
4741 adapter->msg_enable =
4742 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4743 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004744
Sathya Perla92bf14a2013-08-27 16:57:32 +05304745 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004746 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004747}
4748
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004749static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004750{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004751 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004752 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004753
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004754 status = lancer_test_and_set_rdy_state(adapter);
4755 if (status)
4756 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004757
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004758 if (netif_running(adapter->netdev))
4759 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004760
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004761 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004762
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004763 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004764
4765 status = be_setup(adapter);
4766 if (status)
4767 goto err;
4768
4769 if (netif_running(adapter->netdev)) {
4770 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004771 if (status)
4772 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004773 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004774
Somnath Kotur4bebb562013-12-05 12:07:55 +05304775 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004776 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004777err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004778 if (status == -EAGAIN)
4779 dev_err(dev, "Waiting for resource provisioning\n");
4780 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304781 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004782
4783 return status;
4784}
4785
4786static void be_func_recovery_task(struct work_struct *work)
4787{
4788 struct be_adapter *adapter =
4789 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004790 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004791
4792 be_detect_error(adapter);
4793
4794 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004795 rtnl_lock();
4796 netif_device_detach(adapter->netdev);
4797 rtnl_unlock();
4798
4799 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004800 if (!status)
4801 netif_device_attach(adapter->netdev);
4802 }
4803
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004804 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4805 * no need to attempt further recovery.
4806 */
4807 if (!status || status == -EAGAIN)
4808 schedule_delayed_work(&adapter->func_recovery_work,
4809 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004810}
4811
4812static void be_worker(struct work_struct *work)
4813{
4814 struct be_adapter *adapter =
4815 container_of(work, struct be_adapter, work.work);
4816 struct be_rx_obj *rxo;
4817 int i;
4818
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004819 /* when interrupts are not yet enabled, just reap any pending
4820 * mcc completions */
4821 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004822 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004823 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004824 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004825 goto reschedule;
4826 }
4827
4828 if (!adapter->stats_cmd_sent) {
4829 if (lancer_chip(adapter))
4830 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304831 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004832 else
4833 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4834 }
4835
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304836 if (be_physfn(adapter) &&
4837 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004838 be_cmd_get_die_temperature(adapter);
4839
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004840 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304841 /* Replenish RX-queues starved due to memory
4842 * allocation failures.
4843 */
4844 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304845 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004846 }
4847
Sathya Perla2632baf2013-10-01 16:00:00 +05304848 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004849
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004850reschedule:
4851 adapter->work_counter++;
4852 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4853}
4854
Sathya Perla257a3fe2013-06-14 15:54:51 +05304855/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004856static bool be_reset_required(struct be_adapter *adapter)
4857{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304858 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004859}
4860
Sathya Perlad3791422012-09-28 04:39:44 +00004861static char *mc_name(struct be_adapter *adapter)
4862{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304863 char *str = ""; /* default */
4864
4865 switch (adapter->mc_type) {
4866 case UMC:
4867 str = "UMC";
4868 break;
4869 case FLEX10:
4870 str = "FLEX10";
4871 break;
4872 case vNIC1:
4873 str = "vNIC-1";
4874 break;
4875 case nPAR:
4876 str = "nPAR";
4877 break;
4878 case UFP:
4879 str = "UFP";
4880 break;
4881 case vNIC2:
4882 str = "vNIC-2";
4883 break;
4884 default:
4885 str = "";
4886 }
4887
4888 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004889}
4890
4891static inline char *func_name(struct be_adapter *adapter)
4892{
4893 return be_physfn(adapter) ? "PF" : "VF";
4894}
4895
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004896static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004897{
4898 int status = 0;
4899 struct be_adapter *adapter;
4900 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004901 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004902
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304903 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004905 status = pci_enable_device(pdev);
4906 if (status)
4907 goto do_none;
4908
4909 status = pci_request_regions(pdev, DRV_NAME);
4910 if (status)
4911 goto disable_dev;
4912 pci_set_master(pdev);
4913
Sathya Perla7f640062012-06-05 19:37:20 +00004914 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304915 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004916 status = -ENOMEM;
4917 goto rel_reg;
4918 }
4919 adapter = netdev_priv(netdev);
4920 adapter->pdev = pdev;
4921 pci_set_drvdata(pdev, adapter);
4922 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004923 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004924
Russell King4c15c242013-06-26 23:49:11 +01004925 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004926 if (!status) {
4927 netdev->features |= NETIF_F_HIGHDMA;
4928 } else {
Russell King4c15c242013-06-26 23:49:11 +01004929 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004930 if (status) {
4931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4932 goto free_netdev;
4933 }
4934 }
4935
Kalesh AP2f951a92014-09-12 17:39:21 +05304936 status = pci_enable_pcie_error_reporting(pdev);
4937 if (!status)
4938 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004939
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004940 status = be_ctrl_init(adapter);
4941 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004942 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004943
Sathya Perla2243e2e2009-11-22 22:02:03 +00004944 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004945 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004946 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004947 if (status)
4948 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004949 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004950
Sathya Perla39f1d942012-05-08 19:41:24 +00004951 if (be_reset_required(adapter)) {
4952 status = be_cmd_reset_function(adapter);
4953 if (status)
4954 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004955
Kalesh AP2d177be2013-04-28 22:22:29 +00004956 /* Wait for interrupts to quiesce after an FLR */
4957 msleep(100);
4958 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004959
4960 /* Allow interrupts for other ULPs running on NIC function */
4961 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004962
Kalesh AP2d177be2013-04-28 22:22:29 +00004963 /* tell fw we're ready to fire cmds */
4964 status = be_cmd_fw_init(adapter);
4965 if (status)
4966 goto ctrl_clean;
4967
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004968 status = be_stats_init(adapter);
4969 if (status)
4970 goto ctrl_clean;
4971
Sathya Perla39f1d942012-05-08 19:41:24 +00004972 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004973 if (status)
4974 goto stats_clean;
4975
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004976 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004977 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05304978 adapter->rx_fc = true;
4979 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004980
Sathya Perla5fb379e2009-06-18 00:02:59 +00004981 status = be_setup(adapter);
4982 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004983 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004984
Sathya Perla3abcded2010-10-03 22:12:27 -07004985 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004986 status = register_netdev(netdev);
4987 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004988 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004989
Parav Pandit045508a2012-03-26 14:27:13 +00004990 be_roce_dev_add(adapter);
4991
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004992 schedule_delayed_work(&adapter->func_recovery_work,
4993 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004994
4995 be_cmd_query_port_name(adapter, &port_name);
4996
Sathya Perlad3791422012-09-28 04:39:44 +00004997 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4998 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004999
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005000 return 0;
5001
Sathya Perla5fb379e2009-06-18 00:02:59 +00005002unsetup:
5003 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004stats_clean:
5005 be_stats_cleanup(adapter);
5006ctrl_clean:
5007 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005008free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005009 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005010rel_reg:
5011 pci_release_regions(pdev);
5012disable_dev:
5013 pci_disable_device(pdev);
5014do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005015 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005016 return status;
5017}
5018
5019static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5020{
5021 struct be_adapter *adapter = pci_get_drvdata(pdev);
5022 struct net_device *netdev = adapter->netdev;
5023
Suresh Reddy76a9e082014-01-15 13:23:40 +05305024 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005025 be_setup_wol(adapter, true);
5026
Ajit Khaparded4360d62013-11-22 12:51:09 -06005027 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005028 cancel_delayed_work_sync(&adapter->func_recovery_work);
5029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005030 netif_device_detach(netdev);
5031 if (netif_running(netdev)) {
5032 rtnl_lock();
5033 be_close(netdev);
5034 rtnl_unlock();
5035 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005036 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005037
5038 pci_save_state(pdev);
5039 pci_disable_device(pdev);
5040 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5041 return 0;
5042}
5043
5044static int be_resume(struct pci_dev *pdev)
5045{
5046 int status = 0;
5047 struct be_adapter *adapter = pci_get_drvdata(pdev);
5048 struct net_device *netdev = adapter->netdev;
5049
5050 netif_device_detach(netdev);
5051
5052 status = pci_enable_device(pdev);
5053 if (status)
5054 return status;
5055
Yijing Wang1ca01512013-06-27 20:53:42 +08005056 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005057 pci_restore_state(pdev);
5058
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305059 status = be_fw_wait_ready(adapter);
5060 if (status)
5061 return status;
5062
Ajit Khaparded4360d62013-11-22 12:51:09 -06005063 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005064 /* tell fw we're ready to fire cmds */
5065 status = be_cmd_fw_init(adapter);
5066 if (status)
5067 return status;
5068
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005069 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005070 if (netif_running(netdev)) {
5071 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005072 be_open(netdev);
5073 rtnl_unlock();
5074 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005075
5076 schedule_delayed_work(&adapter->func_recovery_work,
5077 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005078 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005079
Suresh Reddy76a9e082014-01-15 13:23:40 +05305080 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005081 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005082
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005083 return 0;
5084}
5085
Sathya Perla82456b02010-02-17 01:35:37 +00005086/*
5087 * An FLR will stop BE from DMAing any data.
5088 */
5089static void be_shutdown(struct pci_dev *pdev)
5090{
5091 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005092
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005093 if (!adapter)
5094 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005095
Devesh Sharmad114f992014-06-10 19:32:15 +05305096 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005097 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005098 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005099
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005100 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005101
Ajit Khaparde57841862011-04-06 18:08:43 +00005102 be_cmd_reset_function(adapter);
5103
Sathya Perla82456b02010-02-17 01:35:37 +00005104 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005105}
5106
Sathya Perlacf588472010-02-14 21:22:01 +00005107static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305108 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005109{
5110 struct be_adapter *adapter = pci_get_drvdata(pdev);
5111 struct net_device *netdev = adapter->netdev;
5112
5113 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5114
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005115 if (!adapter->eeh_error) {
5116 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005117
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005118 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005119
Sathya Perlacf588472010-02-14 21:22:01 +00005120 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005121 netif_device_detach(netdev);
5122 if (netif_running(netdev))
5123 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005124 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005125
5126 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005127 }
Sathya Perlacf588472010-02-14 21:22:01 +00005128
5129 if (state == pci_channel_io_perm_failure)
5130 return PCI_ERS_RESULT_DISCONNECT;
5131
5132 pci_disable_device(pdev);
5133
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005134 /* The error could cause the FW to trigger a flash debug dump.
5135 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005136 * can cause it not to recover; wait for it to finish.
5137 * Wait only for first function as it is needed only once per
5138 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005139 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005140 if (pdev->devfn == 0)
5141 ssleep(30);
5142
Sathya Perlacf588472010-02-14 21:22:01 +00005143 return PCI_ERS_RESULT_NEED_RESET;
5144}
5145
5146static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5147{
5148 struct be_adapter *adapter = pci_get_drvdata(pdev);
5149 int status;
5150
5151 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005152
5153 status = pci_enable_device(pdev);
5154 if (status)
5155 return PCI_ERS_RESULT_DISCONNECT;
5156
5157 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005158 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005159 pci_restore_state(pdev);
5160
5161 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005162 dev_info(&adapter->pdev->dev,
5163 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005164 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005165 if (status)
5166 return PCI_ERS_RESULT_DISCONNECT;
5167
Sathya Perlad6b6d982012-09-05 01:56:48 +00005168 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005169 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005170 return PCI_ERS_RESULT_RECOVERED;
5171}
5172
5173static void be_eeh_resume(struct pci_dev *pdev)
5174{
5175 int status = 0;
5176 struct be_adapter *adapter = pci_get_drvdata(pdev);
5177 struct net_device *netdev = adapter->netdev;
5178
5179 dev_info(&adapter->pdev->dev, "EEH resume\n");
5180
5181 pci_save_state(pdev);
5182
Kalesh AP2d177be2013-04-28 22:22:29 +00005183 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005184 if (status)
5185 goto err;
5186
Kalesh AP03a58ba2014-05-13 14:03:11 +05305187 /* On some BE3 FW versions, after a HW reset,
5188 * interrupts will remain disabled for each function.
5189 * So, explicitly enable interrupts
5190 */
5191 be_intr_set(adapter, true);
5192
Kalesh AP2d177be2013-04-28 22:22:29 +00005193 /* tell fw we're ready to fire cmds */
5194 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005195 if (status)
5196 goto err;
5197
Sathya Perlacf588472010-02-14 21:22:01 +00005198 status = be_setup(adapter);
5199 if (status)
5200 goto err;
5201
5202 if (netif_running(netdev)) {
5203 status = be_open(netdev);
5204 if (status)
5205 goto err;
5206 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005207
5208 schedule_delayed_work(&adapter->func_recovery_work,
5209 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005210 netif_device_attach(netdev);
5211 return;
5212err:
5213 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005214}
5215
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005216static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005217 .error_detected = be_eeh_err_detected,
5218 .slot_reset = be_eeh_reset,
5219 .resume = be_eeh_resume,
5220};
5221
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005222static struct pci_driver be_driver = {
5223 .name = DRV_NAME,
5224 .id_table = be_dev_ids,
5225 .probe = be_probe,
5226 .remove = be_remove,
5227 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005228 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005229 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005230 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005231};
5232
5233static int __init be_init_module(void)
5234{
Joe Perches8e95a202009-12-03 07:58:21 +00005235 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5236 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005237 printk(KERN_WARNING DRV_NAME
5238 " : Module param rx_frag_size must be 2048/4096/8192."
5239 " Using 2048\n");
5240 rx_frag_size = 2048;
5241 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005242
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005243 return pci_register_driver(&be_driver);
5244}
5245module_init(be_init_module);
5246
5247static void __exit be_exit_module(void)
5248{
5249 pci_unregister_driver(&be_driver);
5250}
5251module_exit(be_exit_module);