blob: 8095c83c55ec1446cd519b9ecadffb530c2f2ea8 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530128
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530190
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700200{
201 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530202
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000205
206 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000207 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208}
209
Sathya Perla8788fdc2009-07-27 22:52:03 +0000210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530211 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212{
213 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000217
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000218 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000219 return;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231{
232 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000270 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 }
283
Sathya Perla5a712c12013-07-23 15:24:59 +0530284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000286 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000289 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000290 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
dingtianhong61d23e92013-12-30 15:40:43 +0800295 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530296 status = -EPERM;
297 goto err;
298 }
299
Somnath Koture3a7ae22011-10-27 07:14:05 +0000300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000302 return 0;
303err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305 return status;
306}
307
Sathya Perlaca34fe32012-11-06 17:48:56 +0000308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
322 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
340 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
Sathya Perlaca34fe32012-11-06 17:48:56 +0000393static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
Ajit Khaparde61000862013-10-03 16:16:33 -0500439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530483 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500491}
492
Selvin Xavier005d5692011-05-16 07:36:35 +0000493static void populate_lancer_stats(struct be_adapter *adapter)
494{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla3c8def92011-06-12 20:01:58 +0000665static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668{
Sathya Perla3c8def92011-06-12 20:01:58 +0000669 struct be_tx_stats *stats = tx_stats(txo);
670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000677 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000678 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530683 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* to account for hdr wrb */
690 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000707 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530711 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000712{
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724}
725
Sathya Perlac9c47142014-03-27 10:46:19 +0530726/* Used only for IP tunnel packets */
727static u16 skb_inner_ip_proto(struct sk_buff *skb)
728{
729 return (inner_ip_hdr(skb)->version == 4) ?
730 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
731}
732
733static u16 skb_ip_proto(struct sk_buff *skb)
734{
735 return (ip_hdr(skb)->version == 4) ?
736 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
737}
738
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530740 struct sk_buff *skb, u32 wrb_cnt, u32 len,
741 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742{
Sathya Perlac9c47142014-03-27 10:46:19 +0530743 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700744
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 memset(hdr, 0, sizeof(*hdr));
746
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000749 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
751 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000752 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530753 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530755 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 proto = skb_inner_ip_proto(skb);
758 } else {
759 proto = skb_ip_proto(skb);
760 }
761 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530763 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700767 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 }
772
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530781 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000782{
783 dma_addr_t dma;
784
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000788 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000789 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000790 dma_unmap_single(dev, dma, wrb->frag_len,
791 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000792 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000794 }
795}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Sathya Perla3c8def92011-06-12 20:01:58 +0000797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
799 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800{
Sathya Perla7101e112010-03-22 20:41:12 +0000801 dma_addr_t busaddr;
802 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000807 bool map_single = false;
808 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 hdr = queue_head_node(txq);
811 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000812 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700815 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
818 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000819 goto dma_err;
820 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
825 copied += len;
826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530830
Ian Campbellb061b392011-08-29 23:18:23 +0000831 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000833 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000834 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700835 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000836 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000839 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 }
841
842 if (dummy_wrb) {
843 wrb = queue_head_node(txq);
844 wrb_fill(wrb, 0, 0);
845 be_dws_cpu_to_le(wrb, sizeof(*wrb));
846 queue_head_inc(txq);
847 }
848
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851
852 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000853dma_err:
854 txq->head = map_head;
855 while (copied) {
856 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000857 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000858 map_single = false;
859 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530860 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000861 queue_head_inc(txq);
862 }
863 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Somnath Kotur93040ae2012-06-26 22:32:10 +0000866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 struct sk_buff *skb,
868 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869{
870 u16 vlan_tag = 0;
871
872 skb = skb_share_check(skb, GFP_ATOMIC);
873 if (unlikely(!skb))
874 return skb;
875
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000876 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530878
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
880 if (!vlan_tag)
881 vlan_tag = adapter->pvid;
882 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
883 * skip VLAN insertion
884 */
885 if (skip_hw_vlan)
886 *skip_hw_vlan = true;
887 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888
889 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400890 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000891 if (unlikely(!skb))
892 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000893 skb->vlan_tci = 0;
894 }
895
896 /* Insert the outer VLAN, if any */
897 if (adapter->qnq_vid) {
898 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400899 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000900 if (unlikely(!skb))
901 return skb;
902 if (skip_hw_vlan)
903 *skip_hw_vlan = true;
904 }
905
Somnath Kotur93040ae2012-06-26 22:32:10 +0000906 return skb;
907}
908
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000909static bool be_ipv6_exthdr_check(struct sk_buff *skb)
910{
911 struct ethhdr *eh = (struct ethhdr *)skb->data;
912 u16 offset = ETH_HLEN;
913
914 if (eh->h_proto == htons(ETH_P_IPV6)) {
915 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
916
917 offset += sizeof(struct ipv6hdr);
918 if (ip6h->nexthdr != NEXTHDR_TCP &&
919 ip6h->nexthdr != NEXTHDR_UDP) {
920 struct ipv6_opt_hdr *ehdr =
921 (struct ipv6_opt_hdr *) (skb->data + offset);
922
923 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
924 if (ehdr->hdrlen == 0xff)
925 return true;
926 }
927 }
928 return false;
929}
930
931static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
932{
933 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
934}
935
Sathya Perla748b5392014-05-09 13:29:13 +0530936static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000937{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000939}
940
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530941static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
942 struct sk_buff *skb,
943 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000945 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000946 unsigned int eth_hdr_len;
947 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000948
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000949 /* For padded packets, BE HW modifies tot_len field in IP header
950 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000951 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000952 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000953 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
954 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000955 if (skb->len <= 60 &&
956 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000957 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000958 ip = (struct iphdr *)ip_hdr(skb);
959 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
960 }
961
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000962 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530963 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530965 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530967 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000968
Somnath Kotur93040ae2012-06-26 22:32:10 +0000969 /* HW has a bug wherein it will calculate CSUM for VLAN
970 * pkts even though it is disabled.
971 * Manually insert VLAN in pkt.
972 */
973 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000974 vlan_tx_tag_present(skb)) {
975 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000976 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530977 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 }
979
980 /* HW may lockup when VLAN HW tagging is requested on
981 * certain ipv6 packets. Drop such pkts if the HW workaround to
982 * skip HW tagging is not enabled by FW.
983 */
984 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000985 (adapter->pvid || adapter->qnq_vid) &&
986 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000987 goto tx_drop;
988
989 /* Manual VLAN tag insertion to prevent:
990 * ASIC lockup when the ASIC inserts VLAN tag into
991 * certain ipv6 packets. Insert VLAN tags in driver,
992 * and set event, completion, vlan bits accordingly
993 * in the Tx WRB.
994 */
995 if (be_ipv6_tx_stall_chk(adapter, skb) &&
996 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000997 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000998 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530999 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001000 }
1001
Sathya Perlaee9c7992013-05-22 23:04:55 +00001002 return skb;
1003tx_drop:
1004 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301005err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006 return NULL;
1007}
1008
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301009static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1010 struct sk_buff *skb,
1011 bool *skip_hw_vlan)
1012{
1013 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1014 * less may cause a transmit stall on that port. So the work-around is
1015 * to pad short packets (<= 32 bytes) to a 36-byte length.
1016 */
1017 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1018 if (skb_padto(skb, 36))
1019 return NULL;
1020 skb->len = 36;
1021 }
1022
1023 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1024 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1025 if (!skb)
1026 return NULL;
1027 }
1028
1029 return skb;
1030}
1031
Sathya Perlaee9c7992013-05-22 23:04:55 +00001032static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
1035 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1036 struct be_queue_info *txq = &txo->q;
1037 bool dummy_wrb, stopped = false;
1038 u32 wrb_cnt = 0, copied = 0;
1039 bool skip_hw_vlan = false;
1040 u32 start = txq->head;
1041
1042 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301043 if (!skb) {
1044 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001045 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301046 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001047
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001048 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001050 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1051 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001052 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001053 int gso_segs = skb_shinfo(skb)->gso_segs;
1054
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001055 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001056 BUG_ON(txo->sent_skb_list[start]);
1057 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 /* Ensure txq has space for the next skb; Else stop the queue
1060 * *BEFORE* ringing the tx doorbell, so that we serialze the
1061 * tx compls of the current transmit which'll wake up the queue
1062 */
Sathya Perla7101e112010-03-22 20:41:12 +00001063 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001064 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1065 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001066 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001067 stopped = true;
1068 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001070 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001071
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001072 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001073 } else {
1074 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301075 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001076 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 return NETDEV_TX_OK;
1079}
1080
1081static int be_change_mtu(struct net_device *netdev, int new_mtu)
1082{
1083 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301084 struct device *dev = &adapter->pdev->dev;
1085
1086 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1087 dev_info(dev, "MTU must be between %d and %d bytes\n",
1088 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 return -EINVAL;
1090 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301091
1092 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301093 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094 netdev->mtu = new_mtu;
1095 return 0;
1096}
1097
1098/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001099 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1100 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 */
Sathya Perla10329df2012-06-05 19:37:18 +00001102static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Vasundhara Volam50762662014-09-12 17:39:14 +05301104 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001105 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301106 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001107 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001108
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001109 /* No need to further configure vids if in promiscuous mode */
1110 if (adapter->promiscuous)
1111 return 0;
1112
Sathya Perla92bf14a2013-08-27 16:57:32 +05301113 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001114 goto set_vlan_promisc;
1115
1116 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301117 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1118 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001119
Kalesh AP4d567d92014-05-09 13:29:17 +05301120 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001121 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001122 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301123 if (addl_status(status) ==
1124 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001125 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301126 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 } else {
1128 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1129 /* hw VLAN filtering re-enabled. */
1130 status = be_cmd_rx_filter(adapter,
1131 BE_FLAGS_VLAN_PROMISC, OFF);
1132 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301133 dev_info(dev,
1134 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001135 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136 }
1137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001139
Sathya Perlab31c50a2009-09-17 10:30:13 -07001140 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001141
1142set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301143 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1144 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001145
1146 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1147 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301148 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001149 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1150 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301151 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001152 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153}
1154
Patrick McHardy80d5c362013-04-19 02:04:28 +00001155static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001158 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001160 /* Packets with VID 0 are always received by Lancer by default */
1161 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301162 return status;
1163
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301164 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301165 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001166
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301167 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301168 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001169
Somnath Kotura6b74e02014-01-21 15:50:55 +05301170 status = be_vid_config(adapter);
1171 if (status) {
1172 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301173 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301174 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301175
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001176 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177}
1178
Patrick McHardy80d5c362013-04-19 02:04:28 +00001179static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
1182
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001183 /* Packets with VID 0 are always received by Lancer by default */
1184 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301185 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001186
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301187 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301188 adapter->vlans_added--;
1189
1190 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191}
1192
Somnath kotur7ad09452014-03-03 14:24:43 +05301193static void be_clear_promisc(struct be_adapter *adapter)
1194{
1195 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301196 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301197
1198 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1199}
1200
Sathya Perlaa54769f2011-10-24 02:45:00 +00001201static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202{
1203 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001204 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
1206 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001207 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001208 adapter->promiscuous = true;
1209 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001211
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001212 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001213 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301214 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001215 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001216 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001217 }
1218
Sathya Perlae7b909a2009-11-22 22:01:10 +00001219 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001220 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301221 netdev_mc_count(netdev) > be_max_mc(adapter))
1222 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001223
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001224 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1225 struct netdev_hw_addr *ha;
1226 int i = 1; /* First slot is claimed by the Primary MAC */
1227
1228 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1229 be_cmd_pmac_del(adapter, adapter->if_handle,
1230 adapter->pmac_id[i], 0);
1231 }
1232
Sathya Perla92bf14a2013-08-27 16:57:32 +05301233 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001234 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1235 adapter->promiscuous = true;
1236 goto done;
1237 }
1238
1239 netdev_for_each_uc_addr(ha, adapter->netdev) {
1240 adapter->uc_macs++; /* First slot is for Primary MAC */
1241 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1242 adapter->if_handle,
1243 &adapter->pmac_id[adapter->uc_macs], 0);
1244 }
1245 }
1246
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001247 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301248 if (!status) {
1249 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1250 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1251 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001252 }
Kalesh APa0794882014-05-30 19:06:23 +05301253
1254set_mcast_promisc:
1255 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 return;
1257
1258 /* Set to MCAST promisc mode if setting MULTICAST address fails
1259 * or if num configured exceeds what we support
1260 */
1261 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1262 if (!status)
1263 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001264done:
1265 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266}
1267
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1269{
1270 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001271 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001272 int status;
1273
Sathya Perla11ac75e2011-12-13 00:58:50 +00001274 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001275 return -EPERM;
1276
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001278 return -EINVAL;
1279
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301280 /* Proceed further only if user provided MAC is different
1281 * from active MAC
1282 */
1283 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1284 return 0;
1285
Sathya Perla3175d8c2013-07-23 15:25:03 +05301286 if (BEx_chip(adapter)) {
1287 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1288 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001289
Sathya Perla11ac75e2011-12-13 00:58:50 +00001290 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1291 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301292 } else {
1293 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1294 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001295 }
1296
Kalesh APabccf232014-07-17 16:20:24 +05301297 if (status) {
1298 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1299 mac, vf, status);
1300 return be_cmd_status(status);
1301 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302
Kalesh APabccf232014-07-17 16:20:24 +05301303 ether_addr_copy(vf_cfg->mac_addr, mac);
1304
1305 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001306}
1307
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001308static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301309 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001310{
1311 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001312 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001313
Sathya Perla11ac75e2011-12-13 00:58:50 +00001314 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001315 return -EPERM;
1316
Sathya Perla11ac75e2011-12-13 00:58:50 +00001317 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001318 return -EINVAL;
1319
1320 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001321 vi->max_tx_rate = vf_cfg->tx_rate;
1322 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001323 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1324 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001325 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301326 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001327
1328 return 0;
1329}
1330
Sathya Perla748b5392014-05-09 13:29:13 +05301331static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001332{
1333 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001334 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001335 int status = 0;
1336
Sathya Perla11ac75e2011-12-13 00:58:50 +00001337 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001338 return -EPERM;
1339
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001340 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 return -EINVAL;
1342
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001343 if (vlan || qos) {
1344 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301345 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001346 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1347 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001348 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001349 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301350 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1351 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001352 }
1353
Kalesh APabccf232014-07-17 16:20:24 +05301354 if (status) {
1355 dev_err(&adapter->pdev->dev,
1356 "VLAN %d config on VF %d failed : %#x\n", vlan,
1357 vf, status);
1358 return be_cmd_status(status);
1359 }
1360
1361 vf_cfg->vlan_tag = vlan;
1362
1363 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001364}
1365
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001366static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1367 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368{
1369 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301370 struct device *dev = &adapter->pdev->dev;
1371 int percent_rate, status = 0;
1372 u16 link_speed = 0;
1373 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001374
Sathya Perla11ac75e2011-12-13 00:58:50 +00001375 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001376 return -EPERM;
1377
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001378 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001379 return -EINVAL;
1380
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001381 if (min_tx_rate)
1382 return -EINVAL;
1383
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301384 if (!max_tx_rate)
1385 goto config_qos;
1386
1387 status = be_cmd_link_status_query(adapter, &link_speed,
1388 &link_status, 0);
1389 if (status)
1390 goto err;
1391
1392 if (!link_status) {
1393 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301394 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301395 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001396 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001397
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301398 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1399 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1400 link_speed);
1401 status = -EINVAL;
1402 goto err;
1403 }
1404
1405 /* On Skyhawk the QOS setting must be done only as a % value */
1406 percent_rate = link_speed / 100;
1407 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1408 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1409 percent_rate);
1410 status = -EINVAL;
1411 goto err;
1412 }
1413
1414config_qos:
1415 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001416 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301417 goto err;
1418
1419 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1420 return 0;
1421
1422err:
1423 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1424 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301425 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001426}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301427static int be_set_vf_link_state(struct net_device *netdev, int vf,
1428 int link_state)
1429{
1430 struct be_adapter *adapter = netdev_priv(netdev);
1431 int status;
1432
1433 if (!sriov_enabled(adapter))
1434 return -EPERM;
1435
1436 if (vf >= adapter->num_vfs)
1437 return -EINVAL;
1438
1439 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301440 if (status) {
1441 dev_err(&adapter->pdev->dev,
1442 "Link state change on VF %d failed: %#x\n", vf, status);
1443 return be_cmd_status(status);
1444 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301445
Kalesh APabccf232014-07-17 16:20:24 +05301446 adapter->vf_cfg[vf].plink_tracking = link_state;
1447
1448 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301449}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001450
Sathya Perla2632baf2013-10-01 16:00:00 +05301451static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1452 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453{
Sathya Perla2632baf2013-10-01 16:00:00 +05301454 aic->rx_pkts_prev = rx_pkts;
1455 aic->tx_reqs_prev = tx_pkts;
1456 aic->jiffies = now;
1457}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001458
Sathya Perla2632baf2013-10-01 16:00:00 +05301459static void be_eqd_update(struct be_adapter *adapter)
1460{
1461 struct be_set_eqd set_eqd[MAX_EVT_QS];
1462 int eqd, i, num = 0, start;
1463 struct be_aic_obj *aic;
1464 struct be_eq_obj *eqo;
1465 struct be_rx_obj *rxo;
1466 struct be_tx_obj *txo;
1467 u64 rx_pkts, tx_pkts;
1468 ulong now;
1469 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001470
Sathya Perla2632baf2013-10-01 16:00:00 +05301471 for_all_evt_queues(adapter, eqo, i) {
1472 aic = &adapter->aic_obj[eqo->idx];
1473 if (!aic->enable) {
1474 if (aic->jiffies)
1475 aic->jiffies = 0;
1476 eqd = aic->et_eqd;
1477 goto modify_eqd;
1478 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Sathya Perla2632baf2013-10-01 16:00:00 +05301480 rxo = &adapter->rx_obj[eqo->idx];
1481 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001482 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301483 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001484 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485
Sathya Perla2632baf2013-10-01 16:00:00 +05301486 txo = &adapter->tx_obj[eqo->idx];
1487 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001488 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301489 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001490 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001491
Sathya Perla2632baf2013-10-01 16:00:00 +05301492 /* Skip, if wrapped around or first calculation */
1493 now = jiffies;
1494 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1495 rx_pkts < aic->rx_pkts_prev ||
1496 tx_pkts < aic->tx_reqs_prev) {
1497 be_aic_update(aic, rx_pkts, tx_pkts, now);
1498 continue;
1499 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001500
Sathya Perla2632baf2013-10-01 16:00:00 +05301501 delta = jiffies_to_msecs(now - aic->jiffies);
1502 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1503 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1504 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505
Sathya Perla2632baf2013-10-01 16:00:00 +05301506 if (eqd < 8)
1507 eqd = 0;
1508 eqd = min_t(u32, eqd, aic->max_eqd);
1509 eqd = max_t(u32, eqd, aic->min_eqd);
1510
1511 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001512modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301513 if (eqd != aic->prev_eqd) {
1514 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1515 set_eqd[num].eq_id = eqo->q.id;
1516 aic->prev_eqd = eqd;
1517 num++;
1518 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001519 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301520
1521 if (num)
1522 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001523}
1524
Sathya Perla3abcded2010-10-03 22:12:27 -07001525static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301526 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001527{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001528 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001529
Sathya Perlaab1594e2011-07-25 19:10:15 +00001530 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001531 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001532 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001533 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001534 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001535 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001536 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001537 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001538 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539}
1540
Sathya Perla2e588f82011-03-11 02:49:26 +00001541static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001542{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001543 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301544 * Also ignore ipcksm for ipv6 pkts
1545 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001546 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301547 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001548}
1549
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301550static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001552 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001554 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301555 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 BUG_ON(!rx_page_info->page);
1559
Sathya Perlae50287b2014-03-04 12:14:38 +05301560 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001561 dma_unmap_page(&adapter->pdev->dev,
1562 dma_unmap_addr(rx_page_info, bus),
1563 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301564 rx_page_info->last_frag = false;
1565 } else {
1566 dma_sync_single_for_cpu(&adapter->pdev->dev,
1567 dma_unmap_addr(rx_page_info, bus),
1568 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001569 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301571 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 atomic_dec(&rxq->used);
1573 return rx_page_info;
1574}
1575
1576/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001577static void be_rx_compl_discard(struct be_rx_obj *rxo,
1578 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001581 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001583 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301584 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001585 put_page(page_info->page);
1586 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 }
1588}
1589
1590/*
1591 * skb_fill_rx_data forms a complete skb for an ether frame
1592 * indicated by rxcp.
1593 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001594static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1595 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001598 u16 i, j;
1599 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600 u8 *start;
1601
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301602 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 start = page_address(page_info->page) + page_info->page_offset;
1604 prefetch(start);
1605
1606 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001607 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 skb->len = curr_frag_len;
1610 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001611 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 /* Complete packet has now been moved to data */
1613 put_page(page_info->page);
1614 skb->data_len = 0;
1615 skb->tail += curr_frag_len;
1616 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001617 hdr_len = ETH_HLEN;
1618 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001620 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 skb_shinfo(skb)->frags[0].page_offset =
1622 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301623 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1624 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001626 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 skb->tail += hdr_len;
1628 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001629 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Sathya Perla2e588f82011-03-11 02:49:26 +00001631 if (rxcp->pkt_size <= rx_frag_size) {
1632 BUG_ON(rxcp->num_rcvd != 1);
1633 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 }
1635
1636 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001637 remaining = rxcp->pkt_size - curr_frag_len;
1638 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301639 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001640 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001642 /* Coalesce all frags from the same physical page in one slot */
1643 if (page_info->page_offset == 0) {
1644 /* Fresh page */
1645 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001646 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001647 skb_shinfo(skb)->frags[j].page_offset =
1648 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001649 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001650 skb_shinfo(skb)->nr_frags++;
1651 } else {
1652 put_page(page_info->page);
1653 }
1654
Eric Dumazet9e903e02011-10-18 21:00:24 +00001655 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656 skb->len += curr_frag_len;
1657 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001658 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001659 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001660 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001662 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663}
1664
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001665/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301666static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001669 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001670 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001672
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001673 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001674 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001675 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 return;
1678 }
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001682 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001683 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001684 else
1685 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001687 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001688 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001689 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001690 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301691
Tom Herbertb6c0e892014-08-27 21:27:17 -07001692 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301693 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694
Jiri Pirko343e43c2011-08-25 02:50:51 +00001695 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001696 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001697
1698 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699}
1700
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001701/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001702static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1703 struct napi_struct *napi,
1704 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001709 u16 remaining, curr_frag_len;
1710 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001711
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001712 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001713 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001715 return;
1716 }
1717
Sathya Perla2e588f82011-03-11 02:49:26 +00001718 remaining = rxcp->pkt_size;
1719 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301720 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
1722 curr_frag_len = min(remaining, rx_frag_size);
1723
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001724 /* Coalesce all frags from the same physical page in one slot */
1725 if (i == 0 || page_info->page_offset == 0) {
1726 /* First frag or Fresh page */
1727 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001728 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001729 skb_shinfo(skb)->frags[j].page_offset =
1730 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001731 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001732 } else {
1733 put_page(page_info->page);
1734 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001735 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001736 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 memset(page_info, 0, sizeof(*page_info));
1739 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001740 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001742 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001743 skb->len = rxcp->pkt_size;
1744 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001745 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001746 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001747 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001748 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301749
Tom Herbertb6c0e892014-08-27 21:27:17 -07001750 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301751 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001752
Jiri Pirko343e43c2011-08-25 02:50:51 +00001753 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001754 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001755
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001756 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757}
1758
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001759static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1760 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301762 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1763 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1764 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1765 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1766 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1767 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1768 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1769 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1770 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1771 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1772 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001773 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301774 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1775 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001776 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301777 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301778 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301779 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001780}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1783 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001784{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301785 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1786 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1787 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1788 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1789 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1790 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1791 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1792 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1793 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1794 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1795 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001796 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301797 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1798 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001799 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301800 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1801 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001802}
1803
1804static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1805{
1806 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1807 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1808 struct be_adapter *adapter = rxo->adapter;
1809
1810 /* For checking the valid bit it is Ok to use either definition as the
1811 * valid bit is at the same position in both v0 and v1 Rx compl */
1812 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 return NULL;
1814
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001815 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001816 be_dws_le_to_cpu(compl, sizeof(*compl));
1817
1818 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001819 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001820 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001821 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001822
Somnath Koture38b1702013-05-29 22:55:56 +00001823 if (rxcp->ip_frag)
1824 rxcp->l4_csum = 0;
1825
Sathya Perla15d72182011-03-21 20:49:26 +00001826 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301827 /* In QNQ modes, if qnq bit is not set, then the packet was
1828 * tagged only with the transparent outer vlan-tag and must
1829 * not be treated as a vlan packet by host
1830 */
1831 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001832 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001833
Sathya Perla15d72182011-03-21 20:49:26 +00001834 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001835 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001836
Somnath Kotur939cf302011-08-18 21:51:49 -07001837 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301838 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001839 rxcp->vlanf = 0;
1840 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001841
1842 /* As the compl has been parsed, reset it; we wont touch it again */
1843 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844
Sathya Perla3abcded2010-10-03 22:12:27 -07001845 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 return rxcp;
1847}
1848
Eric Dumazet1829b082011-03-01 05:48:12 +00001849static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001854 gfp |= __GFP_COMP;
1855 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856}
1857
1858/*
1859 * Allocate a page, split it to fragments of size rx_frag_size and post as
1860 * receive buffers to BE
1861 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301862static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863{
Sathya Perla3abcded2010-10-03 22:12:27 -07001864 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001865 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001868 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 struct be_eth_rx_d *rxd;
1870 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301871 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301874 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001876 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001878 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 break;
1880 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001881 page_dmaaddr = dma_map_page(dev, pagep, 0,
1882 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001883 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001884 if (dma_mapping_error(dev, page_dmaaddr)) {
1885 put_page(pagep);
1886 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301887 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001888 break;
1889 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301890 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 } else {
1892 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301893 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301895 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897
1898 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301899 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1901 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902
1903 /* Any space left in the current big page for another frag? */
1904 if ((page_offset + rx_frag_size + rx_frag_size) >
1905 adapter->big_page_size) {
1906 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301907 page_info->last_frag = true;
1908 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1909 } else {
1910 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001912
1913 prev_page_info = page_info;
1914 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301917
1918 /* Mark the last frag of a page when we break out of the above loop
1919 * with no more slots available in the RXQ
1920 */
1921 if (pagep) {
1922 prev_page_info->last_frag = true;
1923 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1924 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925
1926 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301928 if (rxo->rx_post_starved)
1929 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301930 do {
1931 notify = min(256u, posted);
1932 be_rxq_notify(adapter, rxq->id, notify);
1933 posted -= notify;
1934 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001935 } else if (atomic_read(&rxq->used) == 0) {
1936 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939}
1940
Sathya Perla5fb379e2009-06-18 00:02:59 +00001941static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1944
1945 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1946 return NULL;
1947
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001948 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1950
1951 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1952
1953 queue_tail_inc(tx_cq);
1954 return txcp;
1955}
1956
Sathya Perla3c8def92011-06-12 20:01:58 +00001957static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301958 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959{
Sathya Perla3c8def92011-06-12 20:01:58 +00001960 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001961 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001962 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001964 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1965 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001967 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001969 sent_skbs[txq->tail] = NULL;
1970
1971 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001972 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001974 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001976 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001977 unmap_tx_frag(&adapter->pdev->dev, wrb,
1978 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001979 unmap_skb_hdr = false;
1980
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981 num_wrbs++;
1982 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001983 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984
Rick Jones96d49222014-08-28 08:53:16 -07001985 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001986 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987}
1988
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001989/* Return the number of events in the event queue */
1990static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001991{
1992 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001994
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 do {
1996 eqe = queue_tail_node(&eqo->q);
1997 if (eqe->evt == 0)
1998 break;
1999
2000 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002001 eqe->evt = 0;
2002 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003 queue_tail_inc(&eqo->q);
2004 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002005
2006 return num;
2007}
2008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009/* Leaves the EQ is disarmed state */
2010static void be_eq_clean(struct be_eq_obj *eqo)
2011{
2012 int num = events_get(eqo);
2013
2014 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2015}
2016
2017static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018{
2019 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002020 struct be_queue_info *rxq = &rxo->q;
2021 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002023 struct be_adapter *adapter = rxo->adapter;
2024 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025
Sathya Perlad23e9462012-12-17 19:38:51 +00002026 /* Consume pending rx completions.
2027 * Wait for the flush completion (identified by zero num_rcvd)
2028 * to arrive. Notify CQ even when there are no more CQ entries
2029 * for HW to flush partially coalesced CQ entries.
2030 * In Lancer, there is no need to wait for flush compl.
2031 */
2032 for (;;) {
2033 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302034 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002035 if (lancer_chip(adapter))
2036 break;
2037
2038 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2039 dev_warn(&adapter->pdev->dev,
2040 "did not receive flush compl\n");
2041 break;
2042 }
2043 be_cq_notify(adapter, rx_cq->id, true, 0);
2044 mdelay(1);
2045 } else {
2046 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002047 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002048 if (rxcp->num_rcvd == 0)
2049 break;
2050 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 }
2052
Sathya Perlad23e9462012-12-17 19:38:51 +00002053 /* After cleanup, leave the CQ in unarmed state */
2054 be_cq_notify(adapter, rx_cq->id, false, 0);
2055
2056 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302057 while (atomic_read(&rxq->used) > 0) {
2058 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059 put_page(page_info->page);
2060 memset(page_info, 0, sizeof(*page_info));
2061 }
2062 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002063 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064}
2065
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002066static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002068 struct be_tx_obj *txo;
2069 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002070 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002071 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002072 struct sk_buff *sent_skb;
2073 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002074 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302076 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002077 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002078 pending_txqs = adapter->num_tx_qs;
2079
2080 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302081 cmpl = 0;
2082 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002083 txq = &txo->q;
2084 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302085 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002086 num_wrbs += be_tx_compl_process(adapter, txo,
2087 end_idx);
2088 cmpl++;
2089 }
2090 if (cmpl) {
2091 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2092 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302093 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002094 }
2095 if (atomic_read(&txq->used) == 0)
2096 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002097 }
2098
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302099 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002100 break;
2101
2102 mdelay(1);
2103 } while (true);
2104
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002105 for_all_tx_queues(adapter, txo, i) {
2106 txq = &txo->q;
2107 if (atomic_read(&txq->used))
2108 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2109 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002110
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002111 /* free posted tx for which compls will never arrive */
2112 while (atomic_read(&txq->used)) {
2113 sent_skb = txo->sent_skb_list[txq->tail];
2114 end_idx = txq->tail;
2115 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2116 &dummy_wrb);
2117 index_adv(&end_idx, num_wrbs - 1, txq->len);
2118 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2119 atomic_sub(num_wrbs, &txq->used);
2120 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002121 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122}
2123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124static void be_evt_queues_destroy(struct be_adapter *adapter)
2125{
2126 struct be_eq_obj *eqo;
2127 int i;
2128
2129 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002130 if (eqo->q.created) {
2131 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302133 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302134 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002135 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 be_queue_free(adapter, &eqo->q);
2137 }
2138}
2139
2140static int be_evt_queues_create(struct be_adapter *adapter)
2141{
2142 struct be_queue_info *eq;
2143 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302144 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 int i, rc;
2146
Sathya Perla92bf14a2013-08-27 16:57:32 +05302147 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2148 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149
2150 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302151 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2152 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302153 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302154 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302157 aic->max_eqd = BE_MAX_EQD;
2158 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159
2160 eq = &eqo->q;
2161 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302162 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002163 if (rc)
2164 return rc;
2165
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302166 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 if (rc)
2168 return rc;
2169 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002170 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171}
2172
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173static void be_mcc_queues_destroy(struct be_adapter *adapter)
2174{
2175 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176
Sathya Perla8788fdc2009-07-27 22:52:03 +00002177 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002179 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180 be_queue_free(adapter, q);
2181
Sathya Perla8788fdc2009-07-27 22:52:03 +00002182 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002184 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185 be_queue_free(adapter, q);
2186}
2187
2188/* Must be called only after TX qs are created as MCC shares TX EQ */
2189static int be_mcc_queues_create(struct be_adapter *adapter)
2190{
2191 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002192
Sathya Perla8788fdc2009-07-27 22:52:03 +00002193 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302195 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196 goto err;
2197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 /* Use the default EQ for MCC completions */
2199 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 goto mcc_cq_free;
2201
Sathya Perla8788fdc2009-07-27 22:52:03 +00002202 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002203 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2204 goto mcc_cq_destroy;
2205
Sathya Perla8788fdc2009-07-27 22:52:03 +00002206 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002207 goto mcc_q_free;
2208
2209 return 0;
2210
2211mcc_q_free:
2212 be_queue_free(adapter, q);
2213mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002214 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002215mcc_cq_free:
2216 be_queue_free(adapter, cq);
2217err:
2218 return -1;
2219}
2220
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221static void be_tx_queues_destroy(struct be_adapter *adapter)
2222{
2223 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002224 struct be_tx_obj *txo;
2225 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226
Sathya Perla3c8def92011-06-12 20:01:58 +00002227 for_all_tx_queues(adapter, txo, i) {
2228 q = &txo->q;
2229 if (q->created)
2230 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2231 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232
Sathya Perla3c8def92011-06-12 20:01:58 +00002233 q = &txo->cq;
2234 if (q->created)
2235 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2236 be_queue_free(adapter, q);
2237 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238}
2239
Sathya Perla77071332013-08-27 16:57:34 +05302240static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002243 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302244 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245
Sathya Perla92bf14a2013-08-27 16:57:32 +05302246 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002247
Sathya Perla3c8def92011-06-12 20:01:58 +00002248 for_all_tx_queues(adapter, txo, i) {
2249 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2251 sizeof(struct be_eth_tx_compl));
2252 if (status)
2253 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
John Stultz827da442013-10-07 15:51:58 -07002255 u64_stats_init(&txo->stats.sync);
2256 u64_stats_init(&txo->stats.sync_compl);
2257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258 /* If num_evt_qs is less than num_tx_qs, then more than
2259 * one txq share an eq
2260 */
2261 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2262 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2263 if (status)
2264 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002266 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2267 sizeof(struct be_eth_wrb));
2268 if (status)
2269 return status;
2270
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002271 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002272 if (status)
2273 return status;
2274 }
2275
Sathya Perlad3791422012-09-28 04:39:44 +00002276 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2277 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 return 0;
2279}
2280
2281static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282{
2283 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002284 struct be_rx_obj *rxo;
2285 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286
Sathya Perla3abcded2010-10-03 22:12:27 -07002287 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002288 q = &rxo->cq;
2289 if (q->created)
2290 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2291 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293}
2294
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002296{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 struct be_rx_obj *rxo;
2299 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300
Sathya Perla92bf14a2013-08-27 16:57:32 +05302301 /* We can create as many RSS rings as there are EQs. */
2302 adapter->num_rx_qs = adapter->num_evt_qs;
2303
2304 /* We'll use RSS only if atleast 2 RSS rings are supported.
2305 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302307 if (adapter->num_rx_qs > 1)
2308 adapter->num_rx_qs++;
2309
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 for_all_rx_queues(adapter, rxo, i) {
2312 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 cq = &rxo->cq;
2314 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302315 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002316 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318
John Stultz827da442013-10-07 15:51:58 -07002319 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2321 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002322 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325
Sathya Perlad3791422012-09-28 04:39:44 +00002326 dev_info(&adapter->pdev->dev,
2327 "created %d RSS queue(s) and 1 default RX queue\n",
2328 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002330}
2331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332static irqreturn_t be_intx(int irq, void *dev)
2333{
Sathya Perlae49cc342012-11-27 19:50:02 +00002334 struct be_eq_obj *eqo = dev;
2335 struct be_adapter *adapter = eqo->adapter;
2336 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002338 /* IRQ is not expected when NAPI is scheduled as the EQ
2339 * will not be armed.
2340 * But, this can happen on Lancer INTx where it takes
2341 * a while to de-assert INTx or in BE2 where occasionaly
2342 * an interrupt may be raised even when EQ is unarmed.
2343 * If NAPI is already scheduled, then counting & notifying
2344 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002345 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002346 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002347 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002348 __napi_schedule(&eqo->napi);
2349 if (num_evts)
2350 eqo->spurious_intr = 0;
2351 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002352 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002353
2354 /* Return IRQ_HANDLED only for the the first spurious intr
2355 * after a valid intr to stop the kernel from branding
2356 * this irq as a bad one!
2357 */
2358 if (num_evts || eqo->spurious_intr++ == 0)
2359 return IRQ_HANDLED;
2360 else
2361 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362}
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367
Sathya Perla0b545a62012-11-23 00:27:18 +00002368 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2369 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370 return IRQ_HANDLED;
2371}
2372
Sathya Perla2e588f82011-03-11 02:49:26 +00002373static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374{
Somnath Koture38b1702013-05-29 22:55:56 +00002375 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376}
2377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302379 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380{
Sathya Perla3abcded2010-10-03 22:12:27 -07002381 struct be_adapter *adapter = rxo->adapter;
2382 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002383 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302385 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
2387 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002388 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389 if (!rxcp)
2390 break;
2391
Sathya Perla12004ae2011-08-02 19:57:46 +00002392 /* Is it a flush compl that has no data */
2393 if (unlikely(rxcp->num_rcvd == 0))
2394 goto loop_continue;
2395
2396 /* Discard compl with partial DMA Lancer B0 */
2397 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002399 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002400 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002401
Sathya Perla12004ae2011-08-02 19:57:46 +00002402 /* On BE drop pkts that arrive due to imperfect filtering in
2403 * promiscuous mode on some skews
2404 */
2405 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302406 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002408 goto loop_continue;
2409 }
2410
Sathya Perla6384a4d2013-10-25 10:40:16 +05302411 /* Don't do gro when we're busy_polling */
2412 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002414 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302415 be_rx_compl_process(rxo, napi, rxcp);
2416
Sathya Perla12004ae2011-08-02 19:57:46 +00002417loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302418 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002419 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420 }
2421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 if (work_done) {
2423 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002424
Sathya Perla6384a4d2013-10-25 10:40:16 +05302425 /* When an rx-obj gets into post_starved state, just
2426 * let be_worker do the posting.
2427 */
2428 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2429 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302430 be_post_rx_frags(rxo, GFP_ATOMIC,
2431 max_t(u32, MAX_RX_POST,
2432 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002433 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 return work_done;
2436}
2437
Kalesh AP512bb8a2014-09-02 09:56:49 +05302438static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2439{
2440 switch (status) {
2441 case BE_TX_COMP_HDR_PARSE_ERR:
2442 tx_stats(txo)->tx_hdr_parse_err++;
2443 break;
2444 case BE_TX_COMP_NDMA_ERR:
2445 tx_stats(txo)->tx_dma_err++;
2446 break;
2447 case BE_TX_COMP_ACL_ERR:
2448 tx_stats(txo)->tx_spoof_check_err++;
2449 break;
2450 }
2451}
2452
2453static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2454{
2455 switch (status) {
2456 case LANCER_TX_COMP_LSO_ERR:
2457 tx_stats(txo)->tx_tso_err++;
2458 break;
2459 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2460 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2461 tx_stats(txo)->tx_spoof_check_err++;
2462 break;
2463 case LANCER_TX_COMP_QINQ_ERR:
2464 tx_stats(txo)->tx_qinq_err++;
2465 break;
2466 case LANCER_TX_COMP_PARITY_ERR:
2467 tx_stats(txo)->tx_internal_parity_err++;
2468 break;
2469 case LANCER_TX_COMP_DMA_ERR:
2470 tx_stats(txo)->tx_dma_err++;
2471 break;
2472 }
2473}
2474
Sathya Perlac8f64612014-09-02 09:56:55 +05302475static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2476 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002477{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002478 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302479 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302480 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302481 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482
Sathya Perlac8f64612014-09-02 09:56:55 +05302483 while ((txcp = be_tx_compl_get(&txo->cq))) {
2484 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2485 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2486 work_done++;
2487
Kalesh AP512bb8a2014-09-02 09:56:49 +05302488 compl_status = GET_TX_COMPL_BITS(status, txcp);
2489 if (compl_status) {
2490 if (lancer_chip(adapter))
2491 lancer_update_tx_err(txo, compl_status);
2492 else
2493 be_update_tx_err(txo, compl_status);
2494 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002495 }
2496
2497 if (work_done) {
2498 be_cq_notify(adapter, txo->cq.id, true, work_done);
2499 atomic_sub(num_wrbs, &txo->q.used);
2500
2501 /* As Tx wrbs have been freed up, wake up netdev queue
2502 * if it was stopped due to lack of tx wrbs. */
2503 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302504 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002505 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002506 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002507
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002508 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2509 tx_stats(txo)->tx_compl += work_done;
2510 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2511 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512}
Sathya Perla3c8def92011-06-12 20:01:58 +00002513
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302514int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002515{
2516 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2517 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002518 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302519 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302520 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002521
Sathya Perla0b545a62012-11-23 00:27:18 +00002522 num_evts = events_get(eqo);
2523
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302524 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2525 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526
Sathya Perla6384a4d2013-10-25 10:40:16 +05302527 if (be_lock_napi(eqo)) {
2528 /* This loop will iterate twice for EQ0 in which
2529 * completions of the last RXQ (default one) are also processed
2530 * For other EQs the loop iterates only once
2531 */
2532 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2533 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2534 max_work = max(work, max_work);
2535 }
2536 be_unlock_napi(eqo);
2537 } else {
2538 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002539 }
2540
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 if (is_mcc_eqo(eqo))
2542 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002543
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 if (max_work < budget) {
2545 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002546 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002547 } else {
2548 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002549 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002550 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002551 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002552}
2553
Sathya Perla6384a4d2013-10-25 10:40:16 +05302554#ifdef CONFIG_NET_RX_BUSY_POLL
2555static int be_busy_poll(struct napi_struct *napi)
2556{
2557 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2558 struct be_adapter *adapter = eqo->adapter;
2559 struct be_rx_obj *rxo;
2560 int i, work = 0;
2561
2562 if (!be_lock_busy_poll(eqo))
2563 return LL_FLUSH_BUSY;
2564
2565 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2566 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2567 if (work)
2568 break;
2569 }
2570
2571 be_unlock_busy_poll(eqo);
2572 return work;
2573}
2574#endif
2575
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002576void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002577{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002578 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2579 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002580 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302581 bool error_detected = false;
2582 struct device *dev = &adapter->pdev->dev;
2583 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002584
Sathya Perlad23e9462012-12-17 19:38:51 +00002585 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002586 return;
2587
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002588 if (lancer_chip(adapter)) {
2589 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2590 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2591 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302592 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002593 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302594 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302595 adapter->hw_error = true;
2596 /* Do not log error messages if its a FW reset */
2597 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2598 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2599 dev_info(dev, "Firmware update in progress\n");
2600 } else {
2601 error_detected = true;
2602 dev_err(dev, "Error detected in the card\n");
2603 dev_err(dev, "ERR: sliport status 0x%x\n",
2604 sliport_status);
2605 dev_err(dev, "ERR: sliport error1 0x%x\n",
2606 sliport_err1);
2607 dev_err(dev, "ERR: sliport error2 0x%x\n",
2608 sliport_err2);
2609 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002610 }
2611 } else {
2612 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302613 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002614 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302615 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002616 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302617 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002618 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302619 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002620
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002621 ue_lo = (ue_lo & ~ue_lo_mask);
2622 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002623
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302624 /* On certain platforms BE hardware can indicate spurious UEs.
2625 * Allow HW to stop working completely in case of a real UE.
2626 * Hence not setting the hw_error for UE detection.
2627 */
2628
2629 if (ue_lo || ue_hi) {
2630 error_detected = true;
2631 dev_err(dev,
2632 "Unrecoverable Error detected in the adapter");
2633 dev_err(dev, "Please reboot server to recover");
2634 if (skyhawk_chip(adapter))
2635 adapter->hw_error = true;
2636 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2637 if (ue_lo & 1)
2638 dev_err(dev, "UE: %s bit set\n",
2639 ue_status_low_desc[i]);
2640 }
2641 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2642 if (ue_hi & 1)
2643 dev_err(dev, "UE: %s bit set\n",
2644 ue_status_hi_desc[i]);
2645 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302646 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002647 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302648 if (error_detected)
2649 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002650}
2651
Sathya Perla8d56ff12009-11-22 22:02:26 +00002652static void be_msix_disable(struct be_adapter *adapter)
2653{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002654 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002655 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002656 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302657 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002658 }
2659}
2660
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002661static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002663 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002664 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002665
Sathya Perla92bf14a2013-08-27 16:57:32 +05302666 /* If RoCE is supported, program the max number of NIC vectors that
2667 * may be configured via set-channels, along with vectors needed for
2668 * RoCe. Else, just program the number we'll use initially.
2669 */
2670 if (be_roce_supported(adapter))
2671 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2672 2 * num_online_cpus());
2673 else
2674 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002675
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002676 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002677 adapter->msix_entries[i].entry = i;
2678
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002679 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2680 MIN_MSIX_VECTORS, num_vec);
2681 if (num_vec < 0)
2682 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002683
Sathya Perla92bf14a2013-08-27 16:57:32 +05302684 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2685 adapter->num_msix_roce_vec = num_vec / 2;
2686 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2687 adapter->num_msix_roce_vec);
2688 }
2689
2690 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2691
2692 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2693 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002694 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002695
2696fail:
2697 dev_warn(dev, "MSIx enable failed\n");
2698
2699 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2700 if (!be_physfn(adapter))
2701 return num_vec;
2702 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703}
2704
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002705static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302706 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302708 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709}
2710
2711static int be_msix_register(struct be_adapter *adapter)
2712{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002713 struct net_device *netdev = adapter->netdev;
2714 struct be_eq_obj *eqo;
2715 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 for_all_evt_queues(adapter, eqo, i) {
2718 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2719 vec = be_msix_vec_get(adapter, eqo);
2720 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002721 if (status)
2722 goto err_msix;
2723 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002724
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002726err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2728 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2729 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302730 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002731 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 return status;
2733}
2734
2735static int be_irq_register(struct be_adapter *adapter)
2736{
2737 struct net_device *netdev = adapter->netdev;
2738 int status;
2739
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002740 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741 status = be_msix_register(adapter);
2742 if (status == 0)
2743 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002744 /* INTx is not supported for VF */
2745 if (!be_physfn(adapter))
2746 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 }
2748
Sathya Perlae49cc342012-11-27 19:50:02 +00002749 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002750 netdev->irq = adapter->pdev->irq;
2751 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002752 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753 if (status) {
2754 dev_err(&adapter->pdev->dev,
2755 "INTx request IRQ failed - err %d\n", status);
2756 return status;
2757 }
2758done:
2759 adapter->isr_registered = true;
2760 return 0;
2761}
2762
2763static void be_irq_unregister(struct be_adapter *adapter)
2764{
2765 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002766 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002767 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768
2769 if (!adapter->isr_registered)
2770 return;
2771
2772 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002773 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002774 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002775 goto done;
2776 }
2777
2778 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002779 for_all_evt_queues(adapter, eqo, i)
2780 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002782done:
2783 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002784}
2785
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002786static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002787{
2788 struct be_queue_info *q;
2789 struct be_rx_obj *rxo;
2790 int i;
2791
2792 for_all_rx_queues(adapter, rxo, i) {
2793 q = &rxo->q;
2794 if (q->created) {
2795 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002797 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002799 }
2800}
2801
Sathya Perla889cd4b2010-05-30 23:33:45 +00002802static int be_close(struct net_device *netdev)
2803{
2804 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 struct be_eq_obj *eqo;
2806 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002807
Kalesh APe1ad8e32014-04-14 16:12:41 +05302808 /* This protection is needed as be_close() may be called even when the
2809 * adapter is in cleared state (after eeh perm failure)
2810 */
2811 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2812 return 0;
2813
Parav Pandit045508a2012-03-26 14:27:13 +00002814 be_roce_dev_close(adapter);
2815
Ivan Veceradff345c52013-11-27 08:59:32 +01002816 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2817 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002818 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302819 be_disable_busy_poll(eqo);
2820 }
David S. Miller71237b62013-11-28 18:53:36 -05002821 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002822 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002823
2824 be_async_mcc_disable(adapter);
2825
2826 /* Wait for all pending tx completions to arrive so that
2827 * all tx skbs are freed.
2828 */
Sathya Perlafba87552013-05-08 02:05:50 +00002829 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302830 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002831
2832 be_rx_qs_destroy(adapter);
2833
Ajit Khaparded11a3472013-11-18 10:44:37 -06002834 for (i = 1; i < (adapter->uc_macs + 1); i++)
2835 be_cmd_pmac_del(adapter, adapter->if_handle,
2836 adapter->pmac_id[i], 0);
2837 adapter->uc_macs = 0;
2838
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002839 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 if (msix_enabled(adapter))
2841 synchronize_irq(be_msix_vec_get(adapter, eqo));
2842 else
2843 synchronize_irq(netdev->irq);
2844 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002845 }
2846
Sathya Perla889cd4b2010-05-30 23:33:45 +00002847 be_irq_unregister(adapter);
2848
Sathya Perla482c9e72011-06-29 23:33:17 +00002849 return 0;
2850}
2851
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002852static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002853{
2854 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002855 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302856 u8 rss_hkey[RSS_HASH_KEY_LEN];
2857 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002858
2859 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002860 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2861 sizeof(struct be_eth_rx_d));
2862 if (rc)
2863 return rc;
2864 }
2865
2866 /* The FW would like the default RXQ to be created first */
2867 rxo = default_rxo(adapter);
2868 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2869 adapter->if_handle, false, &rxo->rss_id);
2870 if (rc)
2871 return rc;
2872
2873 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002874 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 rx_frag_size, adapter->if_handle,
2876 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002877 if (rc)
2878 return rc;
2879 }
2880
2881 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302882 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2883 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002884 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302885 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002886 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302887 rss->rsstable[j + i] = rxo->rss_id;
2888 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002889 }
2890 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302891 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2892 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002893
2894 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302895 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2896 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302897 } else {
2898 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302899 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302900 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002901
Venkata Duvvurue2557872014-04-21 15:38:00 +05302902 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302903 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302904 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302905 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302906 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302907 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002908 }
2909
Venkata Duvvurue2557872014-04-21 15:38:00 +05302910 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2911
Sathya Perla482c9e72011-06-29 23:33:17 +00002912 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002913 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302914 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002915 return 0;
2916}
2917
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918static int be_open(struct net_device *netdev)
2919{
2920 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002921 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002922 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002923 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002924 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002925 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002926
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002927 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002928 if (status)
2929 goto err;
2930
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002931 status = be_irq_register(adapter);
2932 if (status)
2933 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002934
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002935 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002936 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002937
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002938 for_all_tx_queues(adapter, txo, i)
2939 be_cq_notify(adapter, txo->cq.id, true, 0);
2940
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002941 be_async_mcc_enable(adapter);
2942
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002943 for_all_evt_queues(adapter, eqo, i) {
2944 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302945 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302946 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002947 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002948 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949
Sathya Perla323ff712012-09-28 04:39:43 +00002950 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002951 if (!status)
2952 be_link_status_update(adapter, link_status);
2953
Sathya Perlafba87552013-05-08 02:05:50 +00002954 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002955 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302956
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302957#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302958 if (skyhawk_chip(adapter))
2959 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302960#endif
2961
Sathya Perla889cd4b2010-05-30 23:33:45 +00002962 return 0;
2963err:
2964 be_close(adapter->netdev);
2965 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002966}
2967
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002968static int be_setup_wol(struct be_adapter *adapter, bool enable)
2969{
2970 struct be_dma_mem cmd;
2971 int status = 0;
2972 u8 mac[ETH_ALEN];
2973
2974 memset(mac, 0, ETH_ALEN);
2975
2976 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002977 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2978 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302979 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302980 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002981
2982 if (enable) {
2983 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302984 PCICFG_PM_CONTROL_OFFSET,
2985 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002986 if (status) {
2987 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002988 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002989 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2990 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002991 return status;
2992 }
2993 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302994 adapter->netdev->dev_addr,
2995 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002996 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2997 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2998 } else {
2999 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3000 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3001 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3002 }
3003
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003004 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003005 return status;
3006}
3007
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003008/*
3009 * Generate a seed MAC address from the PF MAC Address using jhash.
3010 * MAC Address for VFs are assigned incrementally starting from the seed.
3011 * These addresses are programmed in the ASIC by the PF and the VF driver
3012 * queries for the MAC address during its probe.
3013 */
Sathya Perla4c876612013-02-03 20:30:11 +00003014static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003015{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003016 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003017 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003018 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003019 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003020
3021 be_vf_eth_addr_generate(adapter, mac);
3022
Sathya Perla11ac75e2011-12-13 00:58:50 +00003023 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303024 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003025 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003026 vf_cfg->if_handle,
3027 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303028 else
3029 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3030 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003031
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003032 if (status)
3033 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303034 "Mac address assignment failed for VF %d\n",
3035 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003036 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003037 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003038
3039 mac[5] += 1;
3040 }
3041 return status;
3042}
3043
Sathya Perla4c876612013-02-03 20:30:11 +00003044static int be_vfs_mac_query(struct be_adapter *adapter)
3045{
3046 int status, vf;
3047 u8 mac[ETH_ALEN];
3048 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003049
3050 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303051 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3052 mac, vf_cfg->if_handle,
3053 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003054 if (status)
3055 return status;
3056 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3057 }
3058 return 0;
3059}
3060
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003061static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003062{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003063 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003064 u32 vf;
3065
Sathya Perla257a3fe2013-06-14 15:54:51 +05303066 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003067 dev_warn(&adapter->pdev->dev,
3068 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003069 goto done;
3070 }
3071
Sathya Perlab4c1df92013-05-08 02:05:47 +00003072 pci_disable_sriov(adapter->pdev);
3073
Sathya Perla11ac75e2011-12-13 00:58:50 +00003074 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303075 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003076 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3077 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303078 else
3079 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3080 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003081
Sathya Perla11ac75e2011-12-13 00:58:50 +00003082 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3083 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003084done:
3085 kfree(adapter->vf_cfg);
3086 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303087 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003088}
3089
Sathya Perla77071332013-08-27 16:57:34 +05303090static void be_clear_queues(struct be_adapter *adapter)
3091{
3092 be_mcc_queues_destroy(adapter);
3093 be_rx_cqs_destroy(adapter);
3094 be_tx_queues_destroy(adapter);
3095 be_evt_queues_destroy(adapter);
3096}
3097
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303098static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003099{
Sathya Perla191eb752012-02-23 18:50:13 +00003100 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3101 cancel_delayed_work_sync(&adapter->work);
3102 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3103 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303104}
3105
Somnath Koturb05004a2013-12-05 12:08:16 +05303106static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303107{
3108 int i;
3109
Somnath Koturb05004a2013-12-05 12:08:16 +05303110 if (adapter->pmac_id) {
3111 for (i = 0; i < (adapter->uc_macs + 1); i++)
3112 be_cmd_pmac_del(adapter, adapter->if_handle,
3113 adapter->pmac_id[i], 0);
3114 adapter->uc_macs = 0;
3115
3116 kfree(adapter->pmac_id);
3117 adapter->pmac_id = NULL;
3118 }
3119}
3120
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303121#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303122static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3123{
3124 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3125 be_cmd_manage_iface(adapter, adapter->if_handle,
3126 OP_CONVERT_TUNNEL_TO_NORMAL);
3127
3128 if (adapter->vxlan_port)
3129 be_cmd_set_vxlan_port(adapter, 0);
3130
3131 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3132 adapter->vxlan_port = 0;
3133}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303134#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303135
Somnath Koturb05004a2013-12-05 12:08:16 +05303136static int be_clear(struct be_adapter *adapter)
3137{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303138 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003139
Sathya Perla11ac75e2011-12-13 00:58:50 +00003140 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003141 be_vf_clear(adapter);
3142
Vasundhara Volambec84e62014-06-30 13:01:32 +05303143 /* Re-configure FW to distribute resources evenly across max-supported
3144 * number of VFs, only when VFs are not already enabled.
3145 */
3146 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3147 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3148 pci_sriov_get_totalvfs(adapter->pdev));
3149
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303150#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303151 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303152#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303153 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303154 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003155
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003156 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003157
Sathya Perla77071332013-08-27 16:57:34 +05303158 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003160 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303161 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003162 return 0;
3163}
3164
Sathya Perla4c876612013-02-03 20:30:11 +00003165static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003166{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303167 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003168 struct be_vf_cfg *vf_cfg;
3169 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003170 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003171
Sathya Perla4c876612013-02-03 20:30:11 +00003172 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3173 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003174
Sathya Perla4c876612013-02-03 20:30:11 +00003175 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303176 if (!BE3_chip(adapter)) {
3177 status = be_cmd_get_profile_config(adapter, &res,
3178 vf + 1);
3179 if (!status)
3180 cap_flags = res.if_cap_flags;
3181 }
Sathya Perla4c876612013-02-03 20:30:11 +00003182
3183 /* If a FW profile exists, then cap_flags are updated */
3184 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303185 BE_IF_FLAGS_BROADCAST |
3186 BE_IF_FLAGS_MULTICAST);
3187 status =
3188 be_cmd_if_create(adapter, cap_flags, en_flags,
3189 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003190 if (status)
3191 goto err;
3192 }
3193err:
3194 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003195}
3196
Sathya Perla39f1d942012-05-08 19:41:24 +00003197static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003198{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003199 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003200 int vf;
3201
Sathya Perla39f1d942012-05-08 19:41:24 +00003202 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3203 GFP_KERNEL);
3204 if (!adapter->vf_cfg)
3205 return -ENOMEM;
3206
Sathya Perla11ac75e2011-12-13 00:58:50 +00003207 for_all_vfs(adapter, vf_cfg, vf) {
3208 vf_cfg->if_handle = -1;
3209 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003210 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003211 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003212}
3213
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003214static int be_vf_setup(struct be_adapter *adapter)
3215{
Sathya Perla4c876612013-02-03 20:30:11 +00003216 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303217 struct be_vf_cfg *vf_cfg;
3218 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303219 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003220
Sathya Perla257a3fe2013-06-14 15:54:51 +05303221 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003222
3223 status = be_vf_setup_init(adapter);
3224 if (status)
3225 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003226
Sathya Perla4c876612013-02-03 20:30:11 +00003227 if (old_vfs) {
3228 for_all_vfs(adapter, vf_cfg, vf) {
3229 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3230 if (status)
3231 goto err;
3232 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003233
Sathya Perla4c876612013-02-03 20:30:11 +00003234 status = be_vfs_mac_query(adapter);
3235 if (status)
3236 goto err;
3237 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303238 status = be_vfs_if_create(adapter);
3239 if (status)
3240 goto err;
3241
Sathya Perla39f1d942012-05-08 19:41:24 +00003242 status = be_vf_eth_addr_config(adapter);
3243 if (status)
3244 goto err;
3245 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003246
Sathya Perla11ac75e2011-12-13 00:58:50 +00003247 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303248 /* Allow VFs to programs MAC/VLAN filters */
3249 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3250 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3251 status = be_cmd_set_fn_privileges(adapter,
3252 privileges |
3253 BE_PRIV_FILTMGMT,
3254 vf + 1);
3255 if (!status)
3256 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3257 vf);
3258 }
3259
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303260 /* Allow full available bandwidth */
3261 if (!old_vfs)
3262 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003263
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303264 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303265 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303266 be_cmd_set_logical_link_config(adapter,
3267 IFLA_VF_LINK_STATE_AUTO,
3268 vf+1);
3269 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003270 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003271
3272 if (!old_vfs) {
3273 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3274 if (status) {
3275 dev_err(dev, "SRIOV enable failed\n");
3276 adapter->num_vfs = 0;
3277 goto err;
3278 }
3279 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303280
3281 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003282 return 0;
3283err:
Sathya Perla4c876612013-02-03 20:30:11 +00003284 dev_err(dev, "VF setup failed\n");
3285 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003286 return status;
3287}
3288
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303289/* Converting function_mode bits on BE3 to SH mc_type enums */
3290
3291static u8 be_convert_mc_type(u32 function_mode)
3292{
Suresh Reddy66064db2014-06-23 16:41:29 +05303293 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303294 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303295 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303296 return FLEX10;
3297 else if (function_mode & VNIC_MODE)
3298 return vNIC2;
3299 else if (function_mode & UMC_ENABLED)
3300 return UMC;
3301 else
3302 return MC_NONE;
3303}
3304
Sathya Perla92bf14a2013-08-27 16:57:32 +05303305/* On BE2/BE3 FW does not suggest the supported limits */
3306static void BEx_get_resources(struct be_adapter *adapter,
3307 struct be_resources *res)
3308{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303309 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303310
3311 if (be_physfn(adapter))
3312 res->max_uc_mac = BE_UC_PMAC_COUNT;
3313 else
3314 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3315
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303316 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3317
3318 if (be_is_mc(adapter)) {
3319 /* Assuming that there are 4 channels per port,
3320 * when multi-channel is enabled
3321 */
3322 if (be_is_qnq_mode(adapter))
3323 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3324 else
3325 /* In a non-qnq multichannel mode, the pvid
3326 * takes up one vlan entry
3327 */
3328 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3329 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303330 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303331 }
3332
Sathya Perla92bf14a2013-08-27 16:57:32 +05303333 res->max_mcast_mac = BE_MAX_MC;
3334
Vasundhara Volama5243da2014-03-11 18:53:07 +05303335 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3336 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3337 * *only* if it is RSS-capable.
3338 */
3339 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3340 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303341 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303342 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303343 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3344 struct be_resources super_nic_res = {0};
3345
3346 /* On a SuperNIC profile, the driver needs to use the
3347 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3348 */
3349 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3350 /* Some old versions of BE3 FW don't report max_tx_qs value */
3351 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3352 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303353 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303354 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303355
3356 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3357 !use_sriov && be_physfn(adapter))
3358 res->max_rss_qs = (adapter->be3_native) ?
3359 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3360 res->max_rx_qs = res->max_rss_qs + 1;
3361
Suresh Reddye3dc8672014-01-06 13:02:25 +05303362 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303363 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303364 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3365 else
3366 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303367
3368 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3369 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3370 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3371}
3372
Sathya Perla30128032011-11-10 19:17:57 +00003373static void be_setup_init(struct be_adapter *adapter)
3374{
3375 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003376 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003377 adapter->if_handle = -1;
3378 adapter->be3_native = false;
3379 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003380 if (be_physfn(adapter))
3381 adapter->cmd_privileges = MAX_PRIVILEGES;
3382 else
3383 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003384}
3385
Vasundhara Volambec84e62014-06-30 13:01:32 +05303386static int be_get_sriov_config(struct be_adapter *adapter)
3387{
3388 struct device *dev = &adapter->pdev->dev;
3389 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303390 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303391
3392 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303393 be_cmd_get_profile_config(adapter, &res, 0);
3394
Vasundhara Volambec84e62014-06-30 13:01:32 +05303395 if (BE3_chip(adapter) && !res.max_vfs) {
3396 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3397 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3398 }
3399
Sathya Perlad3d18312014-08-01 17:47:30 +05303400 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303401
3402 if (!be_max_vfs(adapter)) {
3403 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303404 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303405 adapter->num_vfs = 0;
3406 return 0;
3407 }
3408
Sathya Perlad3d18312014-08-01 17:47:30 +05303409 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3410
Vasundhara Volambec84e62014-06-30 13:01:32 +05303411 /* validate num_vfs module param */
3412 old_vfs = pci_num_vf(adapter->pdev);
3413 if (old_vfs) {
3414 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3415 if (old_vfs != num_vfs)
3416 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3417 adapter->num_vfs = old_vfs;
3418 } else {
3419 if (num_vfs > be_max_vfs(adapter)) {
3420 dev_info(dev, "Resources unavailable to init %d VFs\n",
3421 num_vfs);
3422 dev_info(dev, "Limiting to %d VFs\n",
3423 be_max_vfs(adapter));
3424 }
3425 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3426 }
3427
3428 return 0;
3429}
3430
Sathya Perla92bf14a2013-08-27 16:57:32 +05303431static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003432{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303433 struct device *dev = &adapter->pdev->dev;
3434 struct be_resources res = {0};
3435 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003436
Sathya Perla92bf14a2013-08-27 16:57:32 +05303437 if (BEx_chip(adapter)) {
3438 BEx_get_resources(adapter, &res);
3439 adapter->res = res;
3440 }
3441
Sathya Perla92bf14a2013-08-27 16:57:32 +05303442 /* For Lancer, SH etc read per-function resource limits from FW.
3443 * GET_FUNC_CONFIG returns per function guaranteed limits.
3444 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3445 */
Sathya Perla4c876612013-02-03 20:30:11 +00003446 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303447 status = be_cmd_get_func_config(adapter, &res);
3448 if (status)
3449 return status;
3450
3451 /* If RoCE may be enabled stash away half the EQs for RoCE */
3452 if (be_roce_supported(adapter))
3453 res.max_evt_qs /= 2;
3454 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003455 }
3456
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303457 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3458 be_max_txqs(adapter), be_max_rxqs(adapter),
3459 be_max_rss(adapter), be_max_eqs(adapter),
3460 be_max_vfs(adapter));
3461 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3462 be_max_uc(adapter), be_max_mc(adapter),
3463 be_max_vlans(adapter));
3464
Sathya Perla92bf14a2013-08-27 16:57:32 +05303465 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003466}
3467
Sathya Perlad3d18312014-08-01 17:47:30 +05303468static void be_sriov_config(struct be_adapter *adapter)
3469{
3470 struct device *dev = &adapter->pdev->dev;
3471 int status;
3472
3473 status = be_get_sriov_config(adapter);
3474 if (status) {
3475 dev_err(dev, "Failed to query SR-IOV configuration\n");
3476 dev_err(dev, "SR-IOV cannot be enabled\n");
3477 return;
3478 }
3479
3480 /* When the HW is in SRIOV capable configuration, the PF-pool
3481 * resources are equally distributed across the max-number of
3482 * VFs. The user may request only a subset of the max-vfs to be
3483 * enabled. Based on num_vfs, redistribute the resources across
3484 * num_vfs so that each VF will have access to more number of
3485 * resources. This facility is not available in BE3 FW.
3486 * Also, this is done by FW in Lancer chip.
3487 */
3488 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3489 status = be_cmd_set_sriov_config(adapter,
3490 adapter->pool_res,
3491 adapter->num_vfs);
3492 if (status)
3493 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3494 }
3495}
3496
Sathya Perla39f1d942012-05-08 19:41:24 +00003497static int be_get_config(struct be_adapter *adapter)
3498{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303499 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003500 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003501
Kalesh APe97e3cd2014-07-17 16:20:26 +05303502 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003503 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303504 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003505
Vasundhara Volam542963b2014-01-15 13:23:33 +05303506 if (be_physfn(adapter)) {
3507 status = be_cmd_get_active_profile(adapter, &profile_id);
3508 if (!status)
3509 dev_info(&adapter->pdev->dev,
3510 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303511 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303512
Sathya Perlad3d18312014-08-01 17:47:30 +05303513 if (!BE2_chip(adapter) && be_physfn(adapter))
3514 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303515
Sathya Perla92bf14a2013-08-27 16:57:32 +05303516 status = be_get_resources(adapter);
3517 if (status)
3518 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003519
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303520 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3521 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303522 if (!adapter->pmac_id)
3523 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003524
Sathya Perla92bf14a2013-08-27 16:57:32 +05303525 /* Sanitize cfg_num_qs based on HW and platform limits */
3526 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3527
3528 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003529}
3530
Sathya Perla95046b92013-07-23 15:25:02 +05303531static int be_mac_setup(struct be_adapter *adapter)
3532{
3533 u8 mac[ETH_ALEN];
3534 int status;
3535
3536 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3537 status = be_cmd_get_perm_mac(adapter, mac);
3538 if (status)
3539 return status;
3540
3541 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3542 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3543 } else {
3544 /* Maybe the HW was reset; dev_addr must be re-programmed */
3545 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3546 }
3547
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003548 /* For BE3-R VFs, the PF programs the initial MAC address */
3549 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3550 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3551 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303552 return 0;
3553}
3554
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303555static void be_schedule_worker(struct be_adapter *adapter)
3556{
3557 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3558 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3559}
3560
Sathya Perla77071332013-08-27 16:57:34 +05303561static int be_setup_queues(struct be_adapter *adapter)
3562{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303563 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303564 int status;
3565
3566 status = be_evt_queues_create(adapter);
3567 if (status)
3568 goto err;
3569
3570 status = be_tx_qs_create(adapter);
3571 if (status)
3572 goto err;
3573
3574 status = be_rx_cqs_create(adapter);
3575 if (status)
3576 goto err;
3577
3578 status = be_mcc_queues_create(adapter);
3579 if (status)
3580 goto err;
3581
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303582 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3583 if (status)
3584 goto err;
3585
3586 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3587 if (status)
3588 goto err;
3589
Sathya Perla77071332013-08-27 16:57:34 +05303590 return 0;
3591err:
3592 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3593 return status;
3594}
3595
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303596int be_update_queues(struct be_adapter *adapter)
3597{
3598 struct net_device *netdev = adapter->netdev;
3599 int status;
3600
3601 if (netif_running(netdev))
3602 be_close(netdev);
3603
3604 be_cancel_worker(adapter);
3605
3606 /* If any vectors have been shared with RoCE we cannot re-program
3607 * the MSIx table.
3608 */
3609 if (!adapter->num_msix_roce_vec)
3610 be_msix_disable(adapter);
3611
3612 be_clear_queues(adapter);
3613
3614 if (!msix_enabled(adapter)) {
3615 status = be_msix_enable(adapter);
3616 if (status)
3617 return status;
3618 }
3619
3620 status = be_setup_queues(adapter);
3621 if (status)
3622 return status;
3623
3624 be_schedule_worker(adapter);
3625
3626 if (netif_running(netdev))
3627 status = be_open(netdev);
3628
3629 return status;
3630}
3631
Sathya Perla5fb379e2009-06-18 00:02:59 +00003632static int be_setup(struct be_adapter *adapter)
3633{
Sathya Perla39f1d942012-05-08 19:41:24 +00003634 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303635 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003636 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003637
Sathya Perla30128032011-11-10 19:17:57 +00003638 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003639
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003640 if (!lancer_chip(adapter))
3641 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003642
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003643 status = be_get_config(adapter);
3644 if (status)
3645 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003646
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003647 status = be_msix_enable(adapter);
3648 if (status)
3649 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003650
Sathya Perla77071332013-08-27 16:57:34 +05303651 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3652 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3653 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3654 en_flags |= BE_IF_FLAGS_RSS;
3655 en_flags = en_flags & be_if_cap_flags(adapter);
3656 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3657 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003658 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003659 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303661 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3662 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303663 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303664 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003665 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003666 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003667
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003668 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003669
Sathya Perla95046b92013-07-23 15:25:02 +05303670 status = be_mac_setup(adapter);
3671 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003672 goto err;
3673
Kalesh APe97e3cd2014-07-17 16:20:26 +05303674 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303675 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003676
Somnath Koture9e2a902013-10-24 14:37:53 +05303677 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303678 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303679 adapter->fw_ver);
3680 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3681 }
3682
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003683 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003684 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003685
3686 be_set_rx_mode(adapter->netdev);
3687
Suresh Reddy76a9e082014-01-15 13:23:40 +05303688 be_cmd_get_acpi_wol_cap(adapter);
3689
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003690 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003691
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003692 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3693 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003694 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003695
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303696 if (be_physfn(adapter))
3697 be_cmd_set_logical_link_config(adapter,
3698 IFLA_VF_LINK_STATE_AUTO, 0);
3699
Vasundhara Volambec84e62014-06-30 13:01:32 +05303700 if (adapter->num_vfs)
3701 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003702
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003703 status = be_cmd_get_phy_info(adapter);
3704 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003705 adapter->phy.fc_autoneg = 1;
3706
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303707 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303708 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003709 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003710err:
3711 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003712 return status;
3713}
3714
Ivan Vecera66268732011-12-08 01:31:21 +00003715#ifdef CONFIG_NET_POLL_CONTROLLER
3716static void be_netpoll(struct net_device *netdev)
3717{
3718 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003719 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003720 int i;
3721
Sathya Perlae49cc342012-11-27 19:50:02 +00003722 for_all_evt_queues(adapter, eqo, i) {
3723 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3724 napi_schedule(&eqo->napi);
3725 }
Ivan Vecera66268732011-12-08 01:31:21 +00003726}
3727#endif
3728
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303729static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003730
Sathya Perla306f1342011-08-02 19:57:45 +00003731static bool phy_flashing_required(struct be_adapter *adapter)
3732{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003733 return (adapter->phy.phy_type == TN_8022 &&
3734 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003735}
3736
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003737static bool is_comp_in_ufi(struct be_adapter *adapter,
3738 struct flash_section_info *fsec, int type)
3739{
3740 int i = 0, img_type = 0;
3741 struct flash_section_info_g2 *fsec_g2 = NULL;
3742
Sathya Perlaca34fe32012-11-06 17:48:56 +00003743 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003744 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3745
3746 for (i = 0; i < MAX_FLASH_COMP; i++) {
3747 if (fsec_g2)
3748 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3749 else
3750 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3751
3752 if (img_type == type)
3753 return true;
3754 }
3755 return false;
3756
3757}
3758
Jingoo Han4188e7d2013-08-05 18:02:02 +09003759static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303760 int header_size,
3761 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003762{
3763 struct flash_section_info *fsec = NULL;
3764 const u8 *p = fw->data;
3765
3766 p += header_size;
3767 while (p < (fw->data + fw->size)) {
3768 fsec = (struct flash_section_info *)p;
3769 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3770 return fsec;
3771 p += 32;
3772 }
3773 return NULL;
3774}
3775
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303776static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3777 u32 img_offset, u32 img_size, int hdr_size,
3778 u16 img_optype, bool *crc_match)
3779{
3780 u32 crc_offset;
3781 int status;
3782 u8 crc[4];
3783
3784 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3785 if (status)
3786 return status;
3787
3788 crc_offset = hdr_size + img_offset + img_size - 4;
3789
3790 /* Skip flashing, if crc of flashed region matches */
3791 if (!memcmp(crc, p + crc_offset, 4))
3792 *crc_match = true;
3793 else
3794 *crc_match = false;
3795
3796 return status;
3797}
3798
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003799static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303800 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003801{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003802 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303803 u32 total_bytes, flash_op, num_bytes;
3804 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003805
3806 total_bytes = img_size;
3807 while (total_bytes) {
3808 num_bytes = min_t(u32, 32*1024, total_bytes);
3809
3810 total_bytes -= num_bytes;
3811
3812 if (!total_bytes) {
3813 if (optype == OPTYPE_PHY_FW)
3814 flash_op = FLASHROM_OPER_PHY_FLASH;
3815 else
3816 flash_op = FLASHROM_OPER_FLASH;
3817 } else {
3818 if (optype == OPTYPE_PHY_FW)
3819 flash_op = FLASHROM_OPER_PHY_SAVE;
3820 else
3821 flash_op = FLASHROM_OPER_SAVE;
3822 }
3823
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003824 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003825 img += num_bytes;
3826 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303827 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303828 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303829 optype == OPTYPE_PHY_FW)
3830 break;
3831 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003832 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003833 }
3834 return 0;
3835}
3836
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003837/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003838static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303839 const struct firmware *fw,
3840 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003841{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003842 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303843 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003844 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303845 int status, i, filehdr_size, num_comp;
3846 const struct flash_comp *pflashcomp;
3847 bool crc_match;
3848 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003849
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003850 struct flash_comp gen3_flash_types[] = {
3851 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3852 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3853 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3854 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3855 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3856 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3857 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3858 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3859 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3860 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3861 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3862 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3863 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3864 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3865 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3866 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3867 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3868 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3869 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3870 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003871 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003872
3873 struct flash_comp gen2_flash_types[] = {
3874 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3875 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3876 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3877 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3878 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3879 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3880 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3881 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3882 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3883 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3884 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3885 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3886 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3887 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3888 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3889 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003890 };
3891
Sathya Perlaca34fe32012-11-06 17:48:56 +00003892 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003893 pflashcomp = gen3_flash_types;
3894 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003895 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003896 } else {
3897 pflashcomp = gen2_flash_types;
3898 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003899 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003900 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003901
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003902 /* Get flash section info*/
3903 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3904 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303905 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003906 return -1;
3907 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003908 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003909 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003910 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003911
3912 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3913 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3914 continue;
3915
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003916 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3917 !phy_flashing_required(adapter))
3918 continue;
3919
3920 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303921 status = be_check_flash_crc(adapter, fw->data,
3922 pflashcomp[i].offset,
3923 pflashcomp[i].size,
3924 filehdr_size +
3925 img_hdrs_size,
3926 OPTYPE_REDBOOT, &crc_match);
3927 if (status) {
3928 dev_err(dev,
3929 "Could not get CRC for 0x%x region\n",
3930 pflashcomp[i].optype);
3931 continue;
3932 }
3933
3934 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003935 continue;
3936 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003937
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303938 p = fw->data + filehdr_size + pflashcomp[i].offset +
3939 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003940 if (p + pflashcomp[i].size > fw->data + fw->size)
3941 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003942
3943 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303944 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003945 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303946 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003947 pflashcomp[i].img_type);
3948 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003949 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003950 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003951 return 0;
3952}
3953
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303954static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3955{
3956 u32 img_type = le32_to_cpu(fsec_entry.type);
3957 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3958
3959 if (img_optype != 0xFFFF)
3960 return img_optype;
3961
3962 switch (img_type) {
3963 case IMAGE_FIRMWARE_iSCSI:
3964 img_optype = OPTYPE_ISCSI_ACTIVE;
3965 break;
3966 case IMAGE_BOOT_CODE:
3967 img_optype = OPTYPE_REDBOOT;
3968 break;
3969 case IMAGE_OPTION_ROM_ISCSI:
3970 img_optype = OPTYPE_BIOS;
3971 break;
3972 case IMAGE_OPTION_ROM_PXE:
3973 img_optype = OPTYPE_PXE_BIOS;
3974 break;
3975 case IMAGE_OPTION_ROM_FCoE:
3976 img_optype = OPTYPE_FCOE_BIOS;
3977 break;
3978 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3979 img_optype = OPTYPE_ISCSI_BACKUP;
3980 break;
3981 case IMAGE_NCSI:
3982 img_optype = OPTYPE_NCSI_FW;
3983 break;
3984 case IMAGE_FLASHISM_JUMPVECTOR:
3985 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3986 break;
3987 case IMAGE_FIRMWARE_PHY:
3988 img_optype = OPTYPE_SH_PHY_FW;
3989 break;
3990 case IMAGE_REDBOOT_DIR:
3991 img_optype = OPTYPE_REDBOOT_DIR;
3992 break;
3993 case IMAGE_REDBOOT_CONFIG:
3994 img_optype = OPTYPE_REDBOOT_CONFIG;
3995 break;
3996 case IMAGE_UFI_DIR:
3997 img_optype = OPTYPE_UFI_DIR;
3998 break;
3999 default:
4000 break;
4001 }
4002
4003 return img_optype;
4004}
4005
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004006static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304007 const struct firmware *fw,
4008 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004009{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004010 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304011 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004012 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304013 u32 img_offset, img_size, img_type;
4014 int status, i, filehdr_size;
4015 bool crc_match, old_fw_img;
4016 u16 img_optype;
4017 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004018
4019 filehdr_size = sizeof(struct flash_file_hdr_g3);
4020 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4021 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304022 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304023 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004024 }
4025
4026 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4027 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4028 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304029 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4030 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4031 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004032
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304033 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004034 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304035 /* Don't bother verifying CRC if an old FW image is being
4036 * flashed
4037 */
4038 if (old_fw_img)
4039 goto flash;
4040
4041 status = be_check_flash_crc(adapter, fw->data, img_offset,
4042 img_size, filehdr_size +
4043 img_hdrs_size, img_optype,
4044 &crc_match);
4045 /* The current FW image on the card does not recognize the new
4046 * FLASH op_type. The FW download is partially complete.
4047 * Reboot the server now to enable FW image to recognize the
4048 * new FLASH op_type. To complete the remaining process,
4049 * download the same FW again after the reboot.
4050 */
Kalesh AP4c600052014-05-30 19:06:26 +05304051 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4052 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304053 dev_err(dev, "Flash incomplete. Reset the server\n");
4054 dev_err(dev, "Download FW image again after reset\n");
4055 return -EAGAIN;
4056 } else if (status) {
4057 dev_err(dev, "Could not get CRC for 0x%x region\n",
4058 img_optype);
4059 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004060 }
4061
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304062 if (crc_match)
4063 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004064
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304065flash:
4066 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004067 if (p + img_size > fw->data + fw->size)
4068 return -1;
4069
4070 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304071 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4072 * UFI_DIR region
4073 */
Kalesh AP4c600052014-05-30 19:06:26 +05304074 if (old_fw_img &&
4075 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4076 (img_optype == OPTYPE_UFI_DIR &&
4077 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304078 continue;
4079 } else if (status) {
4080 dev_err(dev, "Flashing section type 0x%x failed\n",
4081 img_type);
4082 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004083 }
4084 }
4085 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004086}
4087
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004088static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304089 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004090{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004091#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4092#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304093 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004094 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004095 const u8 *data_ptr = NULL;
4096 u8 *dest_image_ptr = NULL;
4097 size_t image_size = 0;
4098 u32 chunk_size = 0;
4099 u32 data_written = 0;
4100 u32 offset = 0;
4101 int status = 0;
4102 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004103 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004104
4105 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304106 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304107 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004108 }
4109
4110 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4111 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304112 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004113 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304114 if (!flash_cmd.va)
4115 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004116
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004117 dest_image_ptr = flash_cmd.va +
4118 sizeof(struct lancer_cmd_req_write_object);
4119 image_size = fw->size;
4120 data_ptr = fw->data;
4121
4122 while (image_size) {
4123 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4124
4125 /* Copy the image chunk content. */
4126 memcpy(dest_image_ptr, data_ptr, chunk_size);
4127
4128 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004129 chunk_size, offset,
4130 LANCER_FW_DOWNLOAD_LOCATION,
4131 &data_written, &change_status,
4132 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004133 if (status)
4134 break;
4135
4136 offset += data_written;
4137 data_ptr += data_written;
4138 image_size -= data_written;
4139 }
4140
4141 if (!status) {
4142 /* Commit the FW written */
4143 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004144 0, offset,
4145 LANCER_FW_DOWNLOAD_LOCATION,
4146 &data_written, &change_status,
4147 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004148 }
4149
Kalesh APbb864e02014-09-02 09:56:51 +05304150 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004151 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304152 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304153 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004154 }
4155
Kalesh APbb864e02014-09-02 09:56:51 +05304156 dev_info(dev, "Firmware flashed successfully\n");
4157
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004158 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304159 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004160 status = lancer_physdev_ctrl(adapter,
4161 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004162 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304163 dev_err(dev, "Adapter busy, could not reset FW\n");
4164 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004165 }
4166 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304167 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004168 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304169
4170 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004171}
4172
Sathya Perlaca34fe32012-11-06 17:48:56 +00004173#define UFI_TYPE2 2
4174#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004175#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004176#define UFI_TYPE4 4
4177static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004178 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004179{
Kalesh APddf11692014-07-17 16:20:28 +05304180 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004181 goto be_get_ufi_exit;
4182
Sathya Perlaca34fe32012-11-06 17:48:56 +00004183 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4184 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004185 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4186 if (fhdr->asic_type_rev == 0x10)
4187 return UFI_TYPE3R;
4188 else
4189 return UFI_TYPE3;
4190 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004191 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004192
4193be_get_ufi_exit:
4194 dev_err(&adapter->pdev->dev,
4195 "UFI and Interface are not compatible for flashing\n");
4196 return -1;
4197}
4198
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004199static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4200{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004201 struct flash_file_hdr_g3 *fhdr3;
4202 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004203 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004204 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004205 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004206
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004207 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004208 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4209 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004210 if (!flash_cmd.va) {
4211 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004212 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004213 }
4214
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004215 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004216 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004217
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004218 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004219
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004220 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4221 for (i = 0; i < num_imgs; i++) {
4222 img_hdr_ptr = (struct image_hdr *)(fw->data +
4223 (sizeof(struct flash_file_hdr_g3) +
4224 i * sizeof(struct image_hdr)));
4225 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004226 switch (ufi_type) {
4227 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004228 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304229 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004230 break;
4231 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004232 status = be_flash_BEx(adapter, fw, &flash_cmd,
4233 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004234 break;
4235 case UFI_TYPE3:
4236 /* Do not flash this ufi on BE3-R cards */
4237 if (adapter->asic_rev < 0x10)
4238 status = be_flash_BEx(adapter, fw,
4239 &flash_cmd,
4240 num_imgs);
4241 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304242 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004243 dev_err(&adapter->pdev->dev,
4244 "Can't load BE3 UFI on BE3R\n");
4245 }
4246 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004247 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004248 }
4249
Sathya Perlaca34fe32012-11-06 17:48:56 +00004250 if (ufi_type == UFI_TYPE2)
4251 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004252 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304253 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004254
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004255 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4256 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004257 if (status) {
4258 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004259 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004260 }
4261
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004262 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004263
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004264be_fw_exit:
4265 return status;
4266}
4267
4268int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4269{
4270 const struct firmware *fw;
4271 int status;
4272
4273 if (!netif_running(adapter->netdev)) {
4274 dev_err(&adapter->pdev->dev,
4275 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304276 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004277 }
4278
4279 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4280 if (status)
4281 goto fw_exit;
4282
4283 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4284
4285 if (lancer_chip(adapter))
4286 status = lancer_fw_download(adapter, fw);
4287 else
4288 status = be_fw_download(adapter, fw);
4289
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004290 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304291 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004292
Ajit Khaparde84517482009-09-04 03:12:16 +00004293fw_exit:
4294 release_firmware(fw);
4295 return status;
4296}
4297
Sathya Perla748b5392014-05-09 13:29:13 +05304298static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004299{
4300 struct be_adapter *adapter = netdev_priv(dev);
4301 struct nlattr *attr, *br_spec;
4302 int rem;
4303 int status = 0;
4304 u16 mode = 0;
4305
4306 if (!sriov_enabled(adapter))
4307 return -EOPNOTSUPP;
4308
4309 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4310
4311 nla_for_each_nested(attr, br_spec, rem) {
4312 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4313 continue;
4314
4315 mode = nla_get_u16(attr);
4316 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4317 return -EINVAL;
4318
4319 status = be_cmd_set_hsw_config(adapter, 0, 0,
4320 adapter->if_handle,
4321 mode == BRIDGE_MODE_VEPA ?
4322 PORT_FWD_TYPE_VEPA :
4323 PORT_FWD_TYPE_VEB);
4324 if (status)
4325 goto err;
4326
4327 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4328 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4329
4330 return status;
4331 }
4332err:
4333 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4334 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4335
4336 return status;
4337}
4338
4339static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304340 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004341{
4342 struct be_adapter *adapter = netdev_priv(dev);
4343 int status = 0;
4344 u8 hsw_mode;
4345
4346 if (!sriov_enabled(adapter))
4347 return 0;
4348
4349 /* BE and Lancer chips support VEB mode only */
4350 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4351 hsw_mode = PORT_FWD_TYPE_VEB;
4352 } else {
4353 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4354 adapter->if_handle, &hsw_mode);
4355 if (status)
4356 return 0;
4357 }
4358
4359 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4360 hsw_mode == PORT_FWD_TYPE_VEPA ?
4361 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4362}
4363
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304364#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304365static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4366 __be16 port)
4367{
4368 struct be_adapter *adapter = netdev_priv(netdev);
4369 struct device *dev = &adapter->pdev->dev;
4370 int status;
4371
4372 if (lancer_chip(adapter) || BEx_chip(adapter))
4373 return;
4374
4375 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4376 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4377 be16_to_cpu(port));
4378 dev_info(dev,
4379 "Only one UDP port supported for VxLAN offloads\n");
4380 return;
4381 }
4382
4383 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4384 OP_CONVERT_NORMAL_TO_TUNNEL);
4385 if (status) {
4386 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4387 goto err;
4388 }
4389
4390 status = be_cmd_set_vxlan_port(adapter, port);
4391 if (status) {
4392 dev_warn(dev, "Failed to add VxLAN port\n");
4393 goto err;
4394 }
4395 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4396 adapter->vxlan_port = port;
4397
4398 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4399 be16_to_cpu(port));
4400 return;
4401err:
4402 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304403}
4404
4405static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4406 __be16 port)
4407{
4408 struct be_adapter *adapter = netdev_priv(netdev);
4409
4410 if (lancer_chip(adapter) || BEx_chip(adapter))
4411 return;
4412
4413 if (adapter->vxlan_port != port)
4414 return;
4415
4416 be_disable_vxlan_offloads(adapter);
4417
4418 dev_info(&adapter->pdev->dev,
4419 "Disabled VxLAN offloads for UDP port %d\n",
4420 be16_to_cpu(port));
4421}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304422#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304423
stephen hemmingere5686ad2012-01-05 19:10:25 +00004424static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425 .ndo_open = be_open,
4426 .ndo_stop = be_close,
4427 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004428 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004429 .ndo_set_mac_address = be_mac_addr_set,
4430 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004431 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004432 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4434 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004435 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004436 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004437 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004438 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304439 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004440#ifdef CONFIG_NET_POLL_CONTROLLER
4441 .ndo_poll_controller = be_netpoll,
4442#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004443 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4444 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304445#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304446 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304447#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304448#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304449 .ndo_add_vxlan_port = be_add_vxlan_port,
4450 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304451#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004452};
4453
4454static void be_netdev_init(struct net_device *netdev)
4455{
4456 struct be_adapter *adapter = netdev_priv(netdev);
4457
Sathya Perlac9c47142014-03-27 10:46:19 +05304458 if (skyhawk_chip(adapter)) {
4459 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4460 NETIF_F_TSO | NETIF_F_TSO6 |
4461 NETIF_F_GSO_UDP_TUNNEL;
4462 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4463 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004464 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004465 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004466 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004467 if (be_multi_rxq(adapter))
4468 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004469
4470 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004471 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004472
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004473 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004474 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004475
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004476 netdev->priv_flags |= IFF_UNICAST_FLT;
4477
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004478 netdev->flags |= IFF_MULTICAST;
4479
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004480 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004481
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004482 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004483
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004484 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004485}
4486
4487static void be_unmap_pci_bars(struct be_adapter *adapter)
4488{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004489 if (adapter->csr)
4490 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004491 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004492 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004493}
4494
Sathya Perlace66f782012-11-06 17:48:58 +00004495static int db_bar(struct be_adapter *adapter)
4496{
4497 if (lancer_chip(adapter) || !be_physfn(adapter))
4498 return 0;
4499 else
4500 return 4;
4501}
4502
4503static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004504{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004505 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004506 adapter->roce_db.size = 4096;
4507 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4508 db_bar(adapter));
4509 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4510 db_bar(adapter));
4511 }
Parav Pandit045508a2012-03-26 14:27:13 +00004512 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004513}
4514
4515static int be_map_pci_bars(struct be_adapter *adapter)
4516{
4517 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004518
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004519 if (BEx_chip(adapter) && be_physfn(adapter)) {
4520 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304521 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004522 return -ENOMEM;
4523 }
4524
Sathya Perlace66f782012-11-06 17:48:58 +00004525 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304526 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004528 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004529
4530 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004532
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004533pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304534 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004535 be_unmap_pci_bars(adapter);
4536 return -ENOMEM;
4537}
4538
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004539static void be_ctrl_cleanup(struct be_adapter *adapter)
4540{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004541 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542
4543 be_unmap_pci_bars(adapter);
4544
4545 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004546 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4547 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004548
Sathya Perla5b8821b2011-08-02 19:57:44 +00004549 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004550 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004551 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4552 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004553}
4554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555static int be_ctrl_init(struct be_adapter *adapter)
4556{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004557 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4558 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004559 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004560 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004562
Sathya Perlace66f782012-11-06 17:48:58 +00004563 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4564 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4565 SLI_INTF_FAMILY_SHIFT;
4566 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4567
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568 status = be_map_pci_bars(adapter);
4569 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004570 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004571
4572 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004573 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4574 mbox_mem_alloc->size,
4575 &mbox_mem_alloc->dma,
4576 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004577 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004578 status = -ENOMEM;
4579 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004580 }
4581 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4582 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4583 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4584 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004585
Sathya Perla5b8821b2011-08-02 19:57:44 +00004586 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004587 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4588 rx_filter->size, &rx_filter->dma,
4589 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304590 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004591 status = -ENOMEM;
4592 goto free_mbox;
4593 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004594
Ivan Vecera29849612010-12-14 05:43:19 +00004595 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004596 spin_lock_init(&adapter->mcc_lock);
4597 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004598
Suresh Reddy5eeff632014-01-06 13:02:24 +05304599 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004600 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004601 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004602
4603free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004604 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4605 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004606
4607unmap_pci_bars:
4608 be_unmap_pci_bars(adapter);
4609
4610done:
4611 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004612}
4613
4614static void be_stats_cleanup(struct be_adapter *adapter)
4615{
Sathya Perla3abcded2010-10-03 22:12:27 -07004616 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004617
4618 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004619 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4620 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004621}
4622
4623static int be_stats_init(struct be_adapter *adapter)
4624{
Sathya Perla3abcded2010-10-03 22:12:27 -07004625 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004626
Sathya Perlaca34fe32012-11-06 17:48:56 +00004627 if (lancer_chip(adapter))
4628 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4629 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004630 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004631 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004632 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004633 else
4634 /* ALL non-BE ASICs */
4635 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004636
Joe Perchesede23fa82013-08-26 22:45:23 -07004637 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4638 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304639 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304640 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004641 return 0;
4642}
4643
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004644static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004645{
4646 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004647
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004648 if (!adapter)
4649 return;
4650
Parav Pandit045508a2012-03-26 14:27:13 +00004651 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004652 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004653
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004654 cancel_delayed_work_sync(&adapter->func_recovery_work);
4655
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004656 unregister_netdev(adapter->netdev);
4657
Sathya Perla5fb379e2009-06-18 00:02:59 +00004658 be_clear(adapter);
4659
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004660 /* tell fw we're done with firing cmds */
4661 be_cmd_fw_clean(adapter);
4662
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004663 be_stats_cleanup(adapter);
4664
4665 be_ctrl_cleanup(adapter);
4666
Sathya Perlad6b6d982012-09-05 01:56:48 +00004667 pci_disable_pcie_error_reporting(pdev);
4668
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004669 pci_release_regions(pdev);
4670 pci_disable_device(pdev);
4671
4672 free_netdev(adapter->netdev);
4673}
4674
Sathya Perla39f1d942012-05-08 19:41:24 +00004675static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004676{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304677 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004678
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004679 status = be_cmd_get_cntl_attributes(adapter);
4680 if (status)
4681 return status;
4682
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004683 /* Must be a power of 2 or else MODULO will BUG_ON */
4684 adapter->be_get_temp_freq = 64;
4685
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304686 if (BEx_chip(adapter)) {
4687 level = be_cmd_get_fw_log_level(adapter);
4688 adapter->msg_enable =
4689 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4690 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004691
Sathya Perla92bf14a2013-08-27 16:57:32 +05304692 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004693 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004694}
4695
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004696static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004697{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004698 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004699 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004700
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004701 status = lancer_test_and_set_rdy_state(adapter);
4702 if (status)
4703 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004704
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004705 if (netif_running(adapter->netdev))
4706 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004707
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004708 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004709
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004710 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004711
4712 status = be_setup(adapter);
4713 if (status)
4714 goto err;
4715
4716 if (netif_running(adapter->netdev)) {
4717 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004718 if (status)
4719 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004720 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004721
Somnath Kotur4bebb562013-12-05 12:07:55 +05304722 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004723 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004724err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004725 if (status == -EAGAIN)
4726 dev_err(dev, "Waiting for resource provisioning\n");
4727 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304728 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004729
4730 return status;
4731}
4732
4733static void be_func_recovery_task(struct work_struct *work)
4734{
4735 struct be_adapter *adapter =
4736 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004737 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004738
4739 be_detect_error(adapter);
4740
4741 if (adapter->hw_error && lancer_chip(adapter)) {
4742
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004743 rtnl_lock();
4744 netif_device_detach(adapter->netdev);
4745 rtnl_unlock();
4746
4747 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004748 if (!status)
4749 netif_device_attach(adapter->netdev);
4750 }
4751
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004752 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4753 * no need to attempt further recovery.
4754 */
4755 if (!status || status == -EAGAIN)
4756 schedule_delayed_work(&adapter->func_recovery_work,
4757 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004758}
4759
4760static void be_worker(struct work_struct *work)
4761{
4762 struct be_adapter *adapter =
4763 container_of(work, struct be_adapter, work.work);
4764 struct be_rx_obj *rxo;
4765 int i;
4766
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004767 /* when interrupts are not yet enabled, just reap any pending
4768 * mcc completions */
4769 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004770 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004771 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004772 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004773 goto reschedule;
4774 }
4775
4776 if (!adapter->stats_cmd_sent) {
4777 if (lancer_chip(adapter))
4778 lancer_cmd_get_pport_stats(adapter,
4779 &adapter->stats_cmd);
4780 else
4781 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4782 }
4783
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304784 if (be_physfn(adapter) &&
4785 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004786 be_cmd_get_die_temperature(adapter);
4787
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004788 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304789 /* Replenish RX-queues starved due to memory
4790 * allocation failures.
4791 */
4792 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304793 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004794 }
4795
Sathya Perla2632baf2013-10-01 16:00:00 +05304796 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004797
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004798reschedule:
4799 adapter->work_counter++;
4800 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4801}
4802
Sathya Perla257a3fe2013-06-14 15:54:51 +05304803/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004804static bool be_reset_required(struct be_adapter *adapter)
4805{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304806 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004807}
4808
Sathya Perlad3791422012-09-28 04:39:44 +00004809static char *mc_name(struct be_adapter *adapter)
4810{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304811 char *str = ""; /* default */
4812
4813 switch (adapter->mc_type) {
4814 case UMC:
4815 str = "UMC";
4816 break;
4817 case FLEX10:
4818 str = "FLEX10";
4819 break;
4820 case vNIC1:
4821 str = "vNIC-1";
4822 break;
4823 case nPAR:
4824 str = "nPAR";
4825 break;
4826 case UFP:
4827 str = "UFP";
4828 break;
4829 case vNIC2:
4830 str = "vNIC-2";
4831 break;
4832 default:
4833 str = "";
4834 }
4835
4836 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004837}
4838
4839static inline char *func_name(struct be_adapter *adapter)
4840{
4841 return be_physfn(adapter) ? "PF" : "VF";
4842}
4843
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004844static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004845{
4846 int status = 0;
4847 struct be_adapter *adapter;
4848 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004849 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004850
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304851 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004853 status = pci_enable_device(pdev);
4854 if (status)
4855 goto do_none;
4856
4857 status = pci_request_regions(pdev, DRV_NAME);
4858 if (status)
4859 goto disable_dev;
4860 pci_set_master(pdev);
4861
Sathya Perla7f640062012-06-05 19:37:20 +00004862 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304863 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004864 status = -ENOMEM;
4865 goto rel_reg;
4866 }
4867 adapter = netdev_priv(netdev);
4868 adapter->pdev = pdev;
4869 pci_set_drvdata(pdev, adapter);
4870 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004871 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004872
Russell King4c15c242013-06-26 23:49:11 +01004873 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004874 if (!status) {
4875 netdev->features |= NETIF_F_HIGHDMA;
4876 } else {
Russell King4c15c242013-06-26 23:49:11 +01004877 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004878 if (status) {
4879 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4880 goto free_netdev;
4881 }
4882 }
4883
Kalesh AP2f951a92014-09-12 17:39:21 +05304884 status = pci_enable_pcie_error_reporting(pdev);
4885 if (!status)
4886 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004887
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004888 status = be_ctrl_init(adapter);
4889 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004890 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004891
Sathya Perla2243e2e2009-11-22 22:02:03 +00004892 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004893 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004894 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004895 if (status)
4896 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004897 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004898
Sathya Perla39f1d942012-05-08 19:41:24 +00004899 if (be_reset_required(adapter)) {
4900 status = be_cmd_reset_function(adapter);
4901 if (status)
4902 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004903
Kalesh AP2d177be2013-04-28 22:22:29 +00004904 /* Wait for interrupts to quiesce after an FLR */
4905 msleep(100);
4906 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004907
4908 /* Allow interrupts for other ULPs running on NIC function */
4909 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004910
Kalesh AP2d177be2013-04-28 22:22:29 +00004911 /* tell fw we're ready to fire cmds */
4912 status = be_cmd_fw_init(adapter);
4913 if (status)
4914 goto ctrl_clean;
4915
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004916 status = be_stats_init(adapter);
4917 if (status)
4918 goto ctrl_clean;
4919
Sathya Perla39f1d942012-05-08 19:41:24 +00004920 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004921 if (status)
4922 goto stats_clean;
4923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004924 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004925 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004926 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004927
Sathya Perla5fb379e2009-06-18 00:02:59 +00004928 status = be_setup(adapter);
4929 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004930 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004931
Sathya Perla3abcded2010-10-03 22:12:27 -07004932 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004933 status = register_netdev(netdev);
4934 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004935 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004936
Parav Pandit045508a2012-03-26 14:27:13 +00004937 be_roce_dev_add(adapter);
4938
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004939 schedule_delayed_work(&adapter->func_recovery_work,
4940 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004941
4942 be_cmd_query_port_name(adapter, &port_name);
4943
Sathya Perlad3791422012-09-28 04:39:44 +00004944 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4945 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004946
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004947 return 0;
4948
Sathya Perla5fb379e2009-06-18 00:02:59 +00004949unsetup:
4950 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004951stats_clean:
4952 be_stats_cleanup(adapter);
4953ctrl_clean:
4954 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004955free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004956 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004957rel_reg:
4958 pci_release_regions(pdev);
4959disable_dev:
4960 pci_disable_device(pdev);
4961do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004962 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004963 return status;
4964}
4965
4966static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4967{
4968 struct be_adapter *adapter = pci_get_drvdata(pdev);
4969 struct net_device *netdev = adapter->netdev;
4970
Suresh Reddy76a9e082014-01-15 13:23:40 +05304971 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004972 be_setup_wol(adapter, true);
4973
Ajit Khaparded4360d62013-11-22 12:51:09 -06004974 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004975 cancel_delayed_work_sync(&adapter->func_recovery_work);
4976
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004977 netif_device_detach(netdev);
4978 if (netif_running(netdev)) {
4979 rtnl_lock();
4980 be_close(netdev);
4981 rtnl_unlock();
4982 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004983 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004984
4985 pci_save_state(pdev);
4986 pci_disable_device(pdev);
4987 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4988 return 0;
4989}
4990
4991static int be_resume(struct pci_dev *pdev)
4992{
4993 int status = 0;
4994 struct be_adapter *adapter = pci_get_drvdata(pdev);
4995 struct net_device *netdev = adapter->netdev;
4996
4997 netif_device_detach(netdev);
4998
4999 status = pci_enable_device(pdev);
5000 if (status)
5001 return status;
5002
Yijing Wang1ca01512013-06-27 20:53:42 +08005003 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004 pci_restore_state(pdev);
5005
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305006 status = be_fw_wait_ready(adapter);
5007 if (status)
5008 return status;
5009
Ajit Khaparded4360d62013-11-22 12:51:09 -06005010 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005011 /* tell fw we're ready to fire cmds */
5012 status = be_cmd_fw_init(adapter);
5013 if (status)
5014 return status;
5015
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005016 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005017 if (netif_running(netdev)) {
5018 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005019 be_open(netdev);
5020 rtnl_unlock();
5021 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005022
5023 schedule_delayed_work(&adapter->func_recovery_work,
5024 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005025 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005026
Suresh Reddy76a9e082014-01-15 13:23:40 +05305027 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005028 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005030 return 0;
5031}
5032
Sathya Perla82456b02010-02-17 01:35:37 +00005033/*
5034 * An FLR will stop BE from DMAing any data.
5035 */
5036static void be_shutdown(struct pci_dev *pdev)
5037{
5038 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005039
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005040 if (!adapter)
5041 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005042
Devesh Sharmad114f992014-06-10 19:32:15 +05305043 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005044 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005045 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005046
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005047 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005048
Ajit Khaparde57841862011-04-06 18:08:43 +00005049 be_cmd_reset_function(adapter);
5050
Sathya Perla82456b02010-02-17 01:35:37 +00005051 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005052}
5053
Sathya Perlacf588472010-02-14 21:22:01 +00005054static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305055 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005056{
5057 struct be_adapter *adapter = pci_get_drvdata(pdev);
5058 struct net_device *netdev = adapter->netdev;
5059
5060 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5061
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005062 if (!adapter->eeh_error) {
5063 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005064
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005065 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005066
Sathya Perlacf588472010-02-14 21:22:01 +00005067 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005068 netif_device_detach(netdev);
5069 if (netif_running(netdev))
5070 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005071 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005072
5073 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005074 }
Sathya Perlacf588472010-02-14 21:22:01 +00005075
5076 if (state == pci_channel_io_perm_failure)
5077 return PCI_ERS_RESULT_DISCONNECT;
5078
5079 pci_disable_device(pdev);
5080
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005081 /* The error could cause the FW to trigger a flash debug dump.
5082 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005083 * can cause it not to recover; wait for it to finish.
5084 * Wait only for first function as it is needed only once per
5085 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005086 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005087 if (pdev->devfn == 0)
5088 ssleep(30);
5089
Sathya Perlacf588472010-02-14 21:22:01 +00005090 return PCI_ERS_RESULT_NEED_RESET;
5091}
5092
5093static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5094{
5095 struct be_adapter *adapter = pci_get_drvdata(pdev);
5096 int status;
5097
5098 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005099
5100 status = pci_enable_device(pdev);
5101 if (status)
5102 return PCI_ERS_RESULT_DISCONNECT;
5103
5104 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005105 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005106 pci_restore_state(pdev);
5107
5108 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005109 dev_info(&adapter->pdev->dev,
5110 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005111 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005112 if (status)
5113 return PCI_ERS_RESULT_DISCONNECT;
5114
Sathya Perlad6b6d982012-09-05 01:56:48 +00005115 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005116 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005117 return PCI_ERS_RESULT_RECOVERED;
5118}
5119
5120static void be_eeh_resume(struct pci_dev *pdev)
5121{
5122 int status = 0;
5123 struct be_adapter *adapter = pci_get_drvdata(pdev);
5124 struct net_device *netdev = adapter->netdev;
5125
5126 dev_info(&adapter->pdev->dev, "EEH resume\n");
5127
5128 pci_save_state(pdev);
5129
Kalesh AP2d177be2013-04-28 22:22:29 +00005130 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005131 if (status)
5132 goto err;
5133
Kalesh AP03a58ba2014-05-13 14:03:11 +05305134 /* On some BE3 FW versions, after a HW reset,
5135 * interrupts will remain disabled for each function.
5136 * So, explicitly enable interrupts
5137 */
5138 be_intr_set(adapter, true);
5139
Kalesh AP2d177be2013-04-28 22:22:29 +00005140 /* tell fw we're ready to fire cmds */
5141 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005142 if (status)
5143 goto err;
5144
Sathya Perlacf588472010-02-14 21:22:01 +00005145 status = be_setup(adapter);
5146 if (status)
5147 goto err;
5148
5149 if (netif_running(netdev)) {
5150 status = be_open(netdev);
5151 if (status)
5152 goto err;
5153 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005154
5155 schedule_delayed_work(&adapter->func_recovery_work,
5156 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005157 netif_device_attach(netdev);
5158 return;
5159err:
5160 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005161}
5162
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005163static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005164 .error_detected = be_eeh_err_detected,
5165 .slot_reset = be_eeh_reset,
5166 .resume = be_eeh_resume,
5167};
5168
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005169static struct pci_driver be_driver = {
5170 .name = DRV_NAME,
5171 .id_table = be_dev_ids,
5172 .probe = be_probe,
5173 .remove = be_remove,
5174 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005175 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005176 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005177 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005178};
5179
5180static int __init be_init_module(void)
5181{
Joe Perches8e95a202009-12-03 07:58:21 +00005182 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5183 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005184 printk(KERN_WARNING DRV_NAME
5185 " : Module param rx_frag_size must be 2048/4096/8192."
5186 " Using 2048\n");
5187 rx_frag_size = 2048;
5188 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005189
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005190 return pci_register_driver(&be_driver);
5191}
5192module_init(be_init_module);
5193
5194static void __exit be_exit_module(void)
5195{
5196 pci_unregister_driver(&be_driver);
5197}
5198module_exit(be_exit_module);