blob: ad2b5094a498f2b53be8700e52b330ffba5e364a [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Benoit Taine9baa3c32014-08-08 15:56:03 +020041static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053083 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000087};
Kalesh APe2fb1af2014-09-19 15:46:58 +053088
Ajit Khaparde7c185272010-07-29 06:16:33 +000089/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530128
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530190
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700200{
201 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530202
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000205
206 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000207 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208}
209
Sathya Perla8788fdc2009-07-27 22:52:03 +0000210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530211 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212{
213 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000217
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000218 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000219 return;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231{
232 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000270 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 }
283
Sathya Perla5a712c12013-07-23 15:24:59 +0530284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000286 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000289 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000290 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
dingtianhong61d23e92013-12-30 15:40:43 +0800295 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530296 status = -EPERM;
297 goto err;
298 }
299
Somnath Koture3a7ae22011-10-27 07:14:05 +0000300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000302 return 0;
303err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305 return status;
306}
307
Sathya Perlaca34fe32012-11-06 17:48:56 +0000308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
322 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
340 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
Sathya Perlaca34fe32012-11-06 17:48:56 +0000393static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
Ajit Khaparde61000862013-10-03 16:16:33 -0500439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530483 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500491}
492
Selvin Xavier005d5692011-05-16 07:36:35 +0000493static void populate_lancer_stats(struct be_adapter *adapter)
494{
Selvin Xavier005d5692011-05-16 07:36:35 +0000495 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000529
Sathya Perla09c1c682011-08-22 19:41:53 +0000530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
Jingoo Han4188e7d2013-08-05 18:02:02 +0900542static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530543 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000555void be_parse_stats(struct be_adapter *adapter)
556{
Ajit Khaparde61000862013-10-03 16:16:33 -0500557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558 struct be_rx_obj *rxo;
559 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000560 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else if (BE3_chip(adapter))
568 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000569 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else
571 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000578 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000579}
580
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530582 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000585 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700586 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000587 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000588 u64 pkts, bytes;
589 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700590 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Sathya Perla3abcded2010-10-03 22:12:27 -0700592 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530594
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 }
606
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
619 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
638 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000640
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000646 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647}
648
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 struct net_device *netdev = adapter->netdev;
652
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000654 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000657
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530658 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla3c8def92011-06-12 20:01:58 +0000666 struct be_tx_stats *stats = tx_stats(txo);
667
Sathya Perlaab1594e2011-07-25 19:10:15 +0000668 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000669 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000698 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530702 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100707 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
Sathya Perlac9c47142014-03-27 10:46:19 +0530717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
Somnath Koturcc4ce022010-10-21 07:11:14 -0700730static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530731 struct sk_buff *skb, u32 wrb_cnt, u32 len,
732 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733{
Sathya Perlac9c47142014-03-27 10:46:19 +0530734 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700735
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 memset(hdr, 0, sizeof(*hdr));
737
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530738 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000740 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530741 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
742 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000743 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530744 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530746 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530748 proto = skb_inner_ip_proto(skb);
749 } else {
750 proto = skb_ip_proto(skb);
751 }
752 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530753 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530754 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530755 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 }
757
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100758 if (skb_vlan_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530759 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530761 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 }
763
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
765 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500766
767 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
768 * When this hack is not needed, the evt bit is set while ringing DB
769 */
770 if (skip_hw_vlan)
771 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530775 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000776{
777 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500778 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000779
Sathya Perla7101e112010-03-22 20:41:12 +0000780
Sathya Perlaf986afc2015-02-06 08:18:43 -0500781 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
782 (u64)le32_to_cpu(wrb->frag_pa_lo);
783 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500785 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000786 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500787 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 }
789}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500791/* Returns the number of WRBs used up by the skb */
792static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
793 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500795 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000796 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500797 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000799 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500800 struct be_eth_wrb *wrb;
801 dma_addr_t busaddr;
802 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500805 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
806 be_dws_cpu_to_le(hdr, sizeof(*hdr));
807
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808 queue_head_inc(txq);
809
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700811 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530812
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000813 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
814 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000815 goto dma_err;
816 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, busaddr, len);
David S. Millerebc8d2a2009-06-09 01:01:31 -0700819 queue_head_inc(txq);
820 copied += len;
821 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530824 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530825
Ian Campbellb061b392011-08-29 23:18:23 +0000826 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000827 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000828 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000829 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000831 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700832 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000833 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 }
835
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500836 BUG_ON(txo->sent_skb_list[head]);
837 txo->sent_skb_list[head] = skb;
838 txo->last_req_hdr = head;
839 atomic_add(wrb_cnt, &txq->used);
840 txo->last_req_wrb_cnt = wrb_cnt;
841 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500843 be_tx_stats_update(txo, skb);
844 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500847 /* Bring the queue back to the state it was in before this
848 * routine was invoked.
849 */
850 txq->head = head;
851 /* skip the first wrb (hdr); it's not mapped */
852 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000853 while (copied) {
854 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000855 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000856 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500857 copied -= le32_to_cpu(wrb->frag_len);
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530858 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000859 queue_head_inc(txq);
860 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500861 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000862 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863}
864
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500865static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
866{
867 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
868}
869
Somnath Kotur93040ae2012-06-26 22:32:10 +0000870static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000871 struct sk_buff *skb,
872 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000873{
874 u16 vlan_tag = 0;
875
876 skb = skb_share_check(skb, GFP_ATOMIC);
877 if (unlikely(!skb))
878 return skb;
879
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100880 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000881 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530882
883 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
884 if (!vlan_tag)
885 vlan_tag = adapter->pvid;
886 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
887 * skip VLAN insertion
888 */
889 if (skip_hw_vlan)
890 *skip_hw_vlan = true;
891 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892
893 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100894 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
895 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000896 if (unlikely(!skb))
897 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000898 skb->vlan_tci = 0;
899 }
900
901 /* Insert the outer VLAN, if any */
902 if (adapter->qnq_vid) {
903 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100904 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
905 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000906 if (unlikely(!skb))
907 return skb;
908 if (skip_hw_vlan)
909 *skip_hw_vlan = true;
910 }
911
Somnath Kotur93040ae2012-06-26 22:32:10 +0000912 return skb;
913}
914
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000915static bool be_ipv6_exthdr_check(struct sk_buff *skb)
916{
917 struct ethhdr *eh = (struct ethhdr *)skb->data;
918 u16 offset = ETH_HLEN;
919
920 if (eh->h_proto == htons(ETH_P_IPV6)) {
921 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
922
923 offset += sizeof(struct ipv6hdr);
924 if (ip6h->nexthdr != NEXTHDR_TCP &&
925 ip6h->nexthdr != NEXTHDR_UDP) {
926 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530927 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000928
929 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
930 if (ehdr->hdrlen == 0xff)
931 return true;
932 }
933 }
934 return false;
935}
936
937static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
938{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100939 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000940}
941
Sathya Perla748b5392014-05-09 13:29:13 +0530942static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000943{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000944 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000945}
946
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530947static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
948 struct sk_buff *skb,
949 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000951 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 unsigned int eth_hdr_len;
953 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000954
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000955 /* For padded packets, BE HW modifies tot_len field in IP header
956 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000957 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000958 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
960 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000961 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100962 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000963 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000964 ip = (struct iphdr *)ip_hdr(skb);
965 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
966 }
967
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000968 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530969 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000970 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530971 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000972 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530973 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000974
Somnath Kotur93040ae2012-06-26 22:32:10 +0000975 /* HW has a bug wherein it will calculate CSUM for VLAN
976 * pkts even though it is disabled.
977 * Manually insert VLAN in pkt.
978 */
979 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100980 skb_vlan_tag_present(skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000982 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530983 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000984 }
985
986 /* HW may lockup when VLAN HW tagging is requested on
987 * certain ipv6 packets. Drop such pkts if the HW workaround to
988 * skip HW tagging is not enabled by FW.
989 */
990 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530991 (adapter->pvid || adapter->qnq_vid) &&
992 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000993 goto tx_drop;
994
995 /* Manual VLAN tag insertion to prevent:
996 * ASIC lockup when the ASIC inserts VLAN tag into
997 * certain ipv6 packets. Insert VLAN tags in driver,
998 * and set event, completion, vlan bits accordingly
999 * in the Tx WRB.
1000 */
1001 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1002 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +00001003 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001004 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301005 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001006 }
1007
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008 return skb;
1009tx_drop:
1010 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301011err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001012 return NULL;
1013}
1014
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301015static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1016 struct sk_buff *skb,
1017 bool *skip_hw_vlan)
1018{
1019 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1020 * less may cause a transmit stall on that port. So the work-around is
1021 * to pad short packets (<= 32 bytes) to a 36-byte length.
1022 */
1023 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001024 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301025 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301026 }
1027
1028 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1029 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1030 if (!skb)
1031 return NULL;
1032 }
1033
1034 return skb;
1035}
1036
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001037static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1038{
1039 struct be_queue_info *txq = &txo->q;
1040 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1041
1042 /* Mark the last request eventable if it hasn't been marked already */
1043 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1044 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1045
1046 /* compose a dummy wrb if there are odd set of wrbs to notify */
1047 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001048 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001049 queue_head_inc(txq);
1050 atomic_inc(&txq->used);
1051 txo->pend_wrb_cnt++;
1052 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1053 TX_HDR_WRB_NUM_SHIFT);
1054 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1055 TX_HDR_WRB_NUM_SHIFT);
1056 }
1057 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1058 txo->pend_wrb_cnt = 0;
1059}
1060
Sathya Perlaee9c7992013-05-22 23:04:55 +00001061static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1062{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001063 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001064 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001065 u16 q_idx = skb_get_queue_mapping(skb);
1066 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001067 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001068 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001069
1070 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001071 if (unlikely(!skb))
1072 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001073
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001074 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1075 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001076 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001077 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001079
1080 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1081 netif_stop_subqueue(netdev, q_idx);
1082 tx_stats(txo)->tx_stops++;
1083 }
1084
1085 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1086 be_xmit_flush(adapter, txo);
1087
1088 return NETDEV_TX_OK;
1089drop:
1090 tx_stats(txo)->tx_drv_drops++;
1091 /* Flush the already enqueued tx requests */
1092 if (flush && txo->pend_wrb_cnt)
1093 be_xmit_flush(adapter, txo);
1094
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 return NETDEV_TX_OK;
1096}
1097
1098static int be_change_mtu(struct net_device *netdev, int new_mtu)
1099{
1100 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301101 struct device *dev = &adapter->pdev->dev;
1102
1103 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1104 dev_info(dev, "MTU must be between %d and %d bytes\n",
1105 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 return -EINVAL;
1107 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301108
1109 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301110 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 netdev->mtu = new_mtu;
1112 return 0;
1113}
1114
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001115static inline bool be_in_all_promisc(struct be_adapter *adapter)
1116{
1117 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1118 BE_IF_FLAGS_ALL_PROMISCUOUS;
1119}
1120
1121static int be_set_vlan_promisc(struct be_adapter *adapter)
1122{
1123 struct device *dev = &adapter->pdev->dev;
1124 int status;
1125
1126 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1127 return 0;
1128
1129 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1130 if (!status) {
1131 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1132 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1133 } else {
1134 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1135 }
1136 return status;
1137}
1138
1139static int be_clear_vlan_promisc(struct be_adapter *adapter)
1140{
1141 struct device *dev = &adapter->pdev->dev;
1142 int status;
1143
1144 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1145 if (!status) {
1146 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1147 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1148 }
1149 return status;
1150}
1151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001153 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1154 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155 */
Sathya Perla10329df2012-06-05 19:37:18 +00001156static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157{
Vasundhara Volam50762662014-09-12 17:39:14 +05301158 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001159 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001161 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001162
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001163 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001164 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001165 return 0;
1166
Sathya Perla92bf14a2013-08-27 16:57:32 +05301167 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001168 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001169
1170 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001173
Vasundhara Volam435452a2015-03-20 06:28:23 -04001174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001175 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001176 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001177 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301178 if (addl_status(status) ==
1179 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001180 return be_set_vlan_promisc(adapter);
1181 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1182 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001184 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185}
1186
Patrick McHardy80d5c362013-04-19 02:04:28 +00001187static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188{
1189 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001190 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001192 /* Packets with VID 0 are always received by Lancer by default */
1193 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301194 return status;
1195
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301196 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301197 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001198
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301199 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301200 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001201
Somnath Kotura6b74e02014-01-21 15:50:55 +05301202 status = be_vid_config(adapter);
1203 if (status) {
1204 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301205 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301206 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301207
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001208 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209}
1210
Patrick McHardy80d5c362013-04-19 02:04:28 +00001211static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212{
1213 struct be_adapter *adapter = netdev_priv(netdev);
1214
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001215 /* Packets with VID 0 are always received by Lancer by default */
1216 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301217 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001218
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301219 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301220 adapter->vlans_added--;
1221
1222 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223}
1224
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001225static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301226{
Sathya Perlaac34b742015-02-06 08:18:40 -05001227 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001228 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1229}
1230
1231static void be_set_all_promisc(struct be_adapter *adapter)
1232{
1233 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1234 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1235}
1236
1237static void be_set_mc_promisc(struct be_adapter *adapter)
1238{
1239 int status;
1240
1241 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1242 return;
1243
1244 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1245 if (!status)
1246 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1247}
1248
1249static void be_set_mc_list(struct be_adapter *adapter)
1250{
1251 int status;
1252
1253 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1254 if (!status)
1255 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1256 else
1257 be_set_mc_promisc(adapter);
1258}
1259
1260static void be_set_uc_list(struct be_adapter *adapter)
1261{
1262 struct netdev_hw_addr *ha;
1263 int i = 1; /* First slot is claimed by the Primary MAC */
1264
1265 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1266 be_cmd_pmac_del(adapter, adapter->if_handle,
1267 adapter->pmac_id[i], 0);
1268
1269 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1270 be_set_all_promisc(adapter);
1271 return;
1272 }
1273
1274 netdev_for_each_uc_addr(ha, adapter->netdev) {
1275 adapter->uc_macs++; /* First slot is for Primary MAC */
1276 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1277 &adapter->pmac_id[adapter->uc_macs], 0);
1278 }
1279}
1280
1281static void be_clear_uc_list(struct be_adapter *adapter)
1282{
1283 int i;
1284
1285 for (i = 1; i < (adapter->uc_macs + 1); i++)
1286 be_cmd_pmac_del(adapter, adapter->if_handle,
1287 adapter->pmac_id[i], 0);
1288 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301289}
1290
Sathya Perlaa54769f2011-10-24 02:45:00 +00001291static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292{
1293 struct be_adapter *adapter = netdev_priv(netdev);
1294
1295 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001296 be_set_all_promisc(adapter);
1297 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001299
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001300 /* Interface was previously in promiscuous mode; disable it */
1301 if (be_in_all_promisc(adapter)) {
1302 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001303 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001304 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001305 }
1306
Sathya Perlae7b909a2009-11-22 22:01:10 +00001307 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001308 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001309 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1310 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301311 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001312 }
Kalesh APa0794882014-05-30 19:06:23 +05301313
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001314 if (netdev_uc_count(netdev) != adapter->uc_macs)
1315 be_set_uc_list(adapter);
1316
1317 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001320static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1321{
1322 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001324 int status;
1325
Sathya Perla11ac75e2011-12-13 00:58:50 +00001326 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001327 return -EPERM;
1328
Sathya Perla11ac75e2011-12-13 00:58:50 +00001329 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001330 return -EINVAL;
1331
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301332 /* Proceed further only if user provided MAC is different
1333 * from active MAC
1334 */
1335 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1336 return 0;
1337
Sathya Perla3175d8c2013-07-23 15:25:03 +05301338 if (BEx_chip(adapter)) {
1339 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1340 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001341
Sathya Perla11ac75e2011-12-13 00:58:50 +00001342 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1343 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301344 } else {
1345 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1346 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001347 }
1348
Kalesh APabccf232014-07-17 16:20:24 +05301349 if (status) {
1350 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1351 mac, vf, status);
1352 return be_cmd_status(status);
1353 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001354
Kalesh APabccf232014-07-17 16:20:24 +05301355 ether_addr_copy(vf_cfg->mac_addr, mac);
1356
1357 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001358}
1359
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001360static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301361 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001362{
1363 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001364 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001365
Sathya Perla11ac75e2011-12-13 00:58:50 +00001366 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001367 return -EPERM;
1368
Sathya Perla11ac75e2011-12-13 00:58:50 +00001369 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001370 return -EINVAL;
1371
1372 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001373 vi->max_tx_rate = vf_cfg->tx_rate;
1374 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001375 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1376 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001377 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301378 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001379
1380 return 0;
1381}
1382
Vasundhara Volam435452a2015-03-20 06:28:23 -04001383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
Sathya Perla748b5392014-05-09 13:29:13 +05301439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001440{
1441 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001443 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001444
Sathya Perla11ac75e2011-12-13 00:58:50 +00001445 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001446 return -EPERM;
1447
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001448 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001449 return -EINVAL;
1450
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001451 if (vlan || qos) {
1452 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001453 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001454 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001455 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001456 }
1457
Kalesh APabccf232014-07-17 16:20:24 +05301458 if (status) {
1459 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1461 status);
Kalesh APabccf232014-07-17 16:20:24 +05301462 return be_cmd_status(status);
1463 }
1464
1465 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301466 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001467}
1468
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001469static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1470 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001471{
1472 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301473 struct device *dev = &adapter->pdev->dev;
1474 int percent_rate, status = 0;
1475 u16 link_speed = 0;
1476 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001477
Sathya Perla11ac75e2011-12-13 00:58:50 +00001478 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001479 return -EPERM;
1480
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001481 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001482 return -EINVAL;
1483
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001484 if (min_tx_rate)
1485 return -EINVAL;
1486
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301487 if (!max_tx_rate)
1488 goto config_qos;
1489
1490 status = be_cmd_link_status_query(adapter, &link_speed,
1491 &link_status, 0);
1492 if (status)
1493 goto err;
1494
1495 if (!link_status) {
1496 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301497 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301498 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001499 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001500
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301501 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1502 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1503 link_speed);
1504 status = -EINVAL;
1505 goto err;
1506 }
1507
1508 /* On Skyhawk the QOS setting must be done only as a % value */
1509 percent_rate = link_speed / 100;
1510 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1511 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1512 percent_rate);
1513 status = -EINVAL;
1514 goto err;
1515 }
1516
1517config_qos:
1518 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001519 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301520 goto err;
1521
1522 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1523 return 0;
1524
1525err:
1526 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1527 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301528 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001529}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301530
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301531static int be_set_vf_link_state(struct net_device *netdev, int vf,
1532 int link_state)
1533{
1534 struct be_adapter *adapter = netdev_priv(netdev);
1535 int status;
1536
1537 if (!sriov_enabled(adapter))
1538 return -EPERM;
1539
1540 if (vf >= adapter->num_vfs)
1541 return -EINVAL;
1542
1543 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301544 if (status) {
1545 dev_err(&adapter->pdev->dev,
1546 "Link state change on VF %d failed: %#x\n", vf, status);
1547 return be_cmd_status(status);
1548 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301549
Kalesh APabccf232014-07-17 16:20:24 +05301550 adapter->vf_cfg[vf].plink_tracking = link_state;
1551
1552 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301553}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001554
Sathya Perla2632baf2013-10-01 16:00:00 +05301555static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1556 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557{
Sathya Perla2632baf2013-10-01 16:00:00 +05301558 aic->rx_pkts_prev = rx_pkts;
1559 aic->tx_reqs_prev = tx_pkts;
1560 aic->jiffies = now;
1561}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001562
Sathya Perla2632baf2013-10-01 16:00:00 +05301563static void be_eqd_update(struct be_adapter *adapter)
1564{
1565 struct be_set_eqd set_eqd[MAX_EVT_QS];
1566 int eqd, i, num = 0, start;
1567 struct be_aic_obj *aic;
1568 struct be_eq_obj *eqo;
1569 struct be_rx_obj *rxo;
1570 struct be_tx_obj *txo;
1571 u64 rx_pkts, tx_pkts;
1572 ulong now;
1573 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574
Sathya Perla2632baf2013-10-01 16:00:00 +05301575 for_all_evt_queues(adapter, eqo, i) {
1576 aic = &adapter->aic_obj[eqo->idx];
1577 if (!aic->enable) {
1578 if (aic->jiffies)
1579 aic->jiffies = 0;
1580 eqd = aic->et_eqd;
1581 goto modify_eqd;
1582 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Sathya Perla2632baf2013-10-01 16:00:00 +05301584 rxo = &adapter->rx_obj[eqo->idx];
1585 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001586 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301587 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001588 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001589
Sathya Perla2632baf2013-10-01 16:00:00 +05301590 txo = &adapter->tx_obj[eqo->idx];
1591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001592 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301593 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001594 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001595
Sathya Perla2632baf2013-10-01 16:00:00 +05301596 /* Skip, if wrapped around or first calculation */
1597 now = jiffies;
1598 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1599 rx_pkts < aic->rx_pkts_prev ||
1600 tx_pkts < aic->tx_reqs_prev) {
1601 be_aic_update(aic, rx_pkts, tx_pkts, now);
1602 continue;
1603 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001604
Sathya Perla2632baf2013-10-01 16:00:00 +05301605 delta = jiffies_to_msecs(now - aic->jiffies);
1606 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1607 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1608 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001609
Sathya Perla2632baf2013-10-01 16:00:00 +05301610 if (eqd < 8)
1611 eqd = 0;
1612 eqd = min_t(u32, eqd, aic->max_eqd);
1613 eqd = max_t(u32, eqd, aic->min_eqd);
1614
1615 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001616modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301617 if (eqd != aic->prev_eqd) {
1618 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1619 set_eqd[num].eq_id = eqo->q.id;
1620 aic->prev_eqd = eqd;
1621 num++;
1622 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001623 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301624
1625 if (num)
1626 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001627}
1628
Sathya Perla3abcded2010-10-03 22:12:27 -07001629static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301630 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001631{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001632 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001633
Sathya Perlaab1594e2011-07-25 19:10:15 +00001634 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001635 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001636 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001637 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001638 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001639 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001640 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001641 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001642 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643}
1644
Sathya Perla2e588f82011-03-11 02:49:26 +00001645static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001646{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001647 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301648 * Also ignore ipcksm for ipv6 pkts
1649 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001650 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301651 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001652}
1653
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301654static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001658 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301659 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660
Sathya Perla3abcded2010-10-03 22:12:27 -07001661 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 BUG_ON(!rx_page_info->page);
1663
Sathya Perlae50287b2014-03-04 12:14:38 +05301664 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001665 dma_unmap_page(&adapter->pdev->dev,
1666 dma_unmap_addr(rx_page_info, bus),
1667 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301668 rx_page_info->last_frag = false;
1669 } else {
1670 dma_sync_single_for_cpu(&adapter->pdev->dev,
1671 dma_unmap_addr(rx_page_info, bus),
1672 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301675 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 atomic_dec(&rxq->used);
1677 return rx_page_info;
1678}
1679
1680/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681static void be_rx_compl_discard(struct be_rx_obj *rxo,
1682 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001685 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001687 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301688 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001689 put_page(page_info->page);
1690 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 }
1692}
1693
1694/*
1695 * skb_fill_rx_data forms a complete skb for an ether frame
1696 * indicated by rxcp.
1697 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001698static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1699 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001702 u16 i, j;
1703 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 u8 *start;
1705
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301706 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 start = page_address(page_info->page) + page_info->page_offset;
1708 prefetch(start);
1709
1710 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001711 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 skb->len = curr_frag_len;
1714 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001715 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 /* Complete packet has now been moved to data */
1717 put_page(page_info->page);
1718 skb->data_len = 0;
1719 skb->tail += curr_frag_len;
1720 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001721 hdr_len = ETH_HLEN;
1722 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001724 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 skb_shinfo(skb)->frags[0].page_offset =
1726 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301727 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1728 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001730 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 skb->tail += hdr_len;
1732 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001733 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Sathya Perla2e588f82011-03-11 02:49:26 +00001735 if (rxcp->pkt_size <= rx_frag_size) {
1736 BUG_ON(rxcp->num_rcvd != 1);
1737 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 }
1739
1740 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001741 remaining = rxcp->pkt_size - curr_frag_len;
1742 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301743 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001744 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001746 /* Coalesce all frags from the same physical page in one slot */
1747 if (page_info->page_offset == 0) {
1748 /* Fresh page */
1749 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001750 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001751 skb_shinfo(skb)->frags[j].page_offset =
1752 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001753 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001754 skb_shinfo(skb)->nr_frags++;
1755 } else {
1756 put_page(page_info->page);
1757 }
1758
Eric Dumazet9e903e02011-10-18 21:00:24 +00001759 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 skb->len += curr_frag_len;
1761 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001762 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001763 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001764 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001766 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767}
1768
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001769/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301770static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001774 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001776
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001777 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001778 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001779 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001780 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781 return;
1782 }
1783
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001786 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001787 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001788 else
1789 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001791 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001792 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001793 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001794 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301795
Tom Herbertb6c0e892014-08-27 21:27:17 -07001796 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301797 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
Jiri Pirko343e43c2011-08-25 02:50:51 +00001799 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001800 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001801
1802 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803}
1804
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001805/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001806static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1807 struct napi_struct *napi,
1808 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001810 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001812 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001813 u16 remaining, curr_frag_len;
1814 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001815
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001816 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001817 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001819 return;
1820 }
1821
Sathya Perla2e588f82011-03-11 02:49:26 +00001822 remaining = rxcp->pkt_size;
1823 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301824 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
1826 curr_frag_len = min(remaining, rx_frag_size);
1827
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001828 /* Coalesce all frags from the same physical page in one slot */
1829 if (i == 0 || page_info->page_offset == 0) {
1830 /* First frag or Fresh page */
1831 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001832 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001833 skb_shinfo(skb)->frags[j].page_offset =
1834 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001835 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001836 } else {
1837 put_page(page_info->page);
1838 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001839 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001840 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 memset(page_info, 0, sizeof(*page_info));
1843 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001844 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001846 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001847 skb->len = rxcp->pkt_size;
1848 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001849 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001850 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001851 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001852 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301853
Tom Herbertb6c0e892014-08-27 21:27:17 -07001854 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301855 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001856
Jiri Pirko343e43c2011-08-25 02:50:51 +00001857 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001858 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001859
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001860 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861}
1862
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001863static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1864 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301866 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1867 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1868 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1869 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1870 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1871 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1872 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1873 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1874 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1875 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1876 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001877 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301878 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1879 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001880 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301881 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301882 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301883 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001884}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1887 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001888{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301889 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1890 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1891 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1892 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1893 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1894 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1895 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1896 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1897 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1898 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1899 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001900 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301901 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1902 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001903 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301904 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1905 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001906}
1907
1908static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1909{
1910 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1911 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1912 struct be_adapter *adapter = rxo->adapter;
1913
1914 /* For checking the valid bit it is Ok to use either definition as the
1915 * valid bit is at the same position in both v0 and v1 Rx compl */
1916 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 return NULL;
1918
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001919 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001920 be_dws_le_to_cpu(compl, sizeof(*compl));
1921
1922 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001923 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001924 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001926
Somnath Koture38b1702013-05-29 22:55:56 +00001927 if (rxcp->ip_frag)
1928 rxcp->l4_csum = 0;
1929
Sathya Perla15d72182011-03-21 20:49:26 +00001930 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301931 /* In QNQ modes, if qnq bit is not set, then the packet was
1932 * tagged only with the transparent outer vlan-tag and must
1933 * not be treated as a vlan packet by host
1934 */
1935 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001936 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001937
Sathya Perla15d72182011-03-21 20:49:26 +00001938 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001939 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001940
Somnath Kotur939cf302011-08-18 21:51:49 -07001941 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301942 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001943 rxcp->vlanf = 0;
1944 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001945
1946 /* As the compl has been parsed, reset it; we wont touch it again */
1947 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 return rxcp;
1951}
1952
Eric Dumazet1829b082011-03-01 05:48:12 +00001953static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001956
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001958 gfp |= __GFP_COMP;
1959 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960}
1961
1962/*
1963 * Allocate a page, split it to fragments of size rx_frag_size and post as
1964 * receive buffers to BE
1965 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301966static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967{
Sathya Perla3abcded2010-10-03 22:12:27 -07001968 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001969 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001970 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001972 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 struct be_eth_rx_d *rxd;
1974 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301975 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perla3abcded2010-10-03 22:12:27 -07001977 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301978 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001980 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001982 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 break;
1984 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001985 page_dmaaddr = dma_map_page(dev, pagep, 0,
1986 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001987 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001988 if (dma_mapping_error(dev, page_dmaaddr)) {
1989 put_page(pagep);
1990 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301991 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001992 break;
1993 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301994 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 } else {
1996 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301997 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301999 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002000 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001
2002 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302003 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2005 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006
2007 /* Any space left in the current big page for another frag? */
2008 if ((page_offset + rx_frag_size + rx_frag_size) >
2009 adapter->big_page_size) {
2010 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302011 page_info->last_frag = true;
2012 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2013 } else {
2014 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002016
2017 prev_page_info = page_info;
2018 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002019 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302021
2022 /* Mark the last frag of a page when we break out of the above loop
2023 * with no more slots available in the RXQ
2024 */
2025 if (pagep) {
2026 prev_page_info->last_frag = true;
2027 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2028 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029
2030 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302032 if (rxo->rx_post_starved)
2033 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302034 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002035 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302036 be_rxq_notify(adapter, rxq->id, notify);
2037 posted -= notify;
2038 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002039 } else if (atomic_read(&rxq->used) == 0) {
2040 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002041 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043}
2044
Sathya Perla5fb379e2009-06-18 00:02:59 +00002045static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
2048
2049 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2050 return NULL;
2051
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002052 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2054
2055 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2056
2057 queue_tail_inc(tx_cq);
2058 return txcp;
2059}
2060
Sathya Perla3c8def92011-06-12 20:01:58 +00002061static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302062 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063{
Sathya Perla3c8def92011-06-12 20:01:58 +00002064 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002065 struct be_queue_info *txq = &txo->q;
2066 u16 frag_index, num_wrbs = 0;
2067 struct sk_buff *skb = NULL;
2068 bool unmap_skb_hdr = false;
2069 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002071 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002072 if (sent_skbs[txq->tail]) {
2073 /* Free skb from prev req */
2074 if (skb)
2075 dev_consume_skb_any(skb);
2076 skb = sent_skbs[txq->tail];
2077 sent_skbs[txq->tail] = NULL;
2078 queue_tail_inc(txq); /* skip hdr wrb */
2079 num_wrbs++;
2080 unmap_skb_hdr = true;
2081 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002082 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002083 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002084 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002085 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002086 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002088 num_wrbs++;
2089 } while (frag_index != last_index);
2090 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002092 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093}
2094
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095/* Return the number of events in the event queue */
2096static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002097{
2098 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 do {
2102 eqe = queue_tail_node(&eqo->q);
2103 if (eqe->evt == 0)
2104 break;
2105
2106 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002107 eqe->evt = 0;
2108 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109 queue_tail_inc(&eqo->q);
2110 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002111
2112 return num;
2113}
2114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115/* Leaves the EQ is disarmed state */
2116static void be_eq_clean(struct be_eq_obj *eqo)
2117{
2118 int num = events_get(eqo);
2119
2120 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2121}
2122
2123static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124{
2125 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 struct be_queue_info *rxq = &rxo->q;
2127 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002128 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002129 struct be_adapter *adapter = rxo->adapter;
2130 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131
Sathya Perlad23e9462012-12-17 19:38:51 +00002132 /* Consume pending rx completions.
2133 * Wait for the flush completion (identified by zero num_rcvd)
2134 * to arrive. Notify CQ even when there are no more CQ entries
2135 * for HW to flush partially coalesced CQ entries.
2136 * In Lancer, there is no need to wait for flush compl.
2137 */
2138 for (;;) {
2139 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302140 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002141 if (lancer_chip(adapter))
2142 break;
2143
2144 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2145 dev_warn(&adapter->pdev->dev,
2146 "did not receive flush compl\n");
2147 break;
2148 }
2149 be_cq_notify(adapter, rx_cq->id, true, 0);
2150 mdelay(1);
2151 } else {
2152 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002153 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002154 if (rxcp->num_rcvd == 0)
2155 break;
2156 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157 }
2158
Sathya Perlad23e9462012-12-17 19:38:51 +00002159 /* After cleanup, leave the CQ in unarmed state */
2160 be_cq_notify(adapter, rx_cq->id, false, 0);
2161
2162 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302163 while (atomic_read(&rxq->used) > 0) {
2164 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 put_page(page_info->page);
2166 memset(page_info, 0, sizeof(*page_info));
2167 }
2168 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302169 rxq->tail = 0;
2170 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171}
2172
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002173static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002175 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2176 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002177 struct be_tx_obj *txo;
2178 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002179 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002180 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302182 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002183 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002184 pending_txqs = adapter->num_tx_qs;
2185
2186 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302187 cmpl = 0;
2188 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002189 txq = &txo->q;
2190 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302191 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002192 num_wrbs += be_tx_compl_process(adapter, txo,
2193 end_idx);
2194 cmpl++;
2195 }
2196 if (cmpl) {
2197 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2198 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302199 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002200 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002201 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002202 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002203 }
2204
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302205 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002206 break;
2207
2208 mdelay(1);
2209 } while (true);
2210
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002211 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002212 for_all_tx_queues(adapter, txo, i) {
2213 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002214
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002215 if (atomic_read(&txq->used)) {
2216 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2217 i, atomic_read(&txq->used));
2218 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002219 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002220 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2221 txq->len);
2222 /* Use the tx-compl process logic to handle requests
2223 * that were not sent to the HW.
2224 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002225 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2226 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002227 BUG_ON(atomic_read(&txq->used));
2228 txo->pend_wrb_cnt = 0;
2229 /* Since hw was never notified of these requests,
2230 * reset TXQ indices
2231 */
2232 txq->head = notified_idx;
2233 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002234 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002235 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236}
2237
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238static void be_evt_queues_destroy(struct be_adapter *adapter)
2239{
2240 struct be_eq_obj *eqo;
2241 int i;
2242
2243 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002244 if (eqo->q.created) {
2245 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002246 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302247 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302248 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002249 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 be_queue_free(adapter, &eqo->q);
2251 }
2252}
2253
2254static int be_evt_queues_create(struct be_adapter *adapter)
2255{
2256 struct be_queue_info *eq;
2257 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302258 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 int i, rc;
2260
Sathya Perla92bf14a2013-08-27 16:57:32 +05302261 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2262 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263
2264 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302265 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2266 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302267 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302268 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302271 aic->max_eqd = BE_MAX_EQD;
2272 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273
2274 eq = &eqo->q;
2275 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302276 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277 if (rc)
2278 return rc;
2279
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302280 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 if (rc)
2282 return rc;
2283 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002284 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285}
2286
Sathya Perla5fb379e2009-06-18 00:02:59 +00002287static void be_mcc_queues_destroy(struct be_adapter *adapter)
2288{
2289 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002290
Sathya Perla8788fdc2009-07-27 22:52:03 +00002291 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002292 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002293 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002294 be_queue_free(adapter, q);
2295
Sathya Perla8788fdc2009-07-27 22:52:03 +00002296 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002297 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002298 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002299 be_queue_free(adapter, q);
2300}
2301
2302/* Must be called only after TX qs are created as MCC shares TX EQ */
2303static int be_mcc_queues_create(struct be_adapter *adapter)
2304{
2305 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002306
Sathya Perla8788fdc2009-07-27 22:52:03 +00002307 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002308 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302309 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002310 goto err;
2311
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 /* Use the default EQ for MCC completions */
2313 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002314 goto mcc_cq_free;
2315
Sathya Perla8788fdc2009-07-27 22:52:03 +00002316 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002317 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2318 goto mcc_cq_destroy;
2319
Sathya Perla8788fdc2009-07-27 22:52:03 +00002320 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002321 goto mcc_q_free;
2322
2323 return 0;
2324
2325mcc_q_free:
2326 be_queue_free(adapter, q);
2327mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002328 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002329mcc_cq_free:
2330 be_queue_free(adapter, cq);
2331err:
2332 return -1;
2333}
2334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335static void be_tx_queues_destroy(struct be_adapter *adapter)
2336{
2337 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002338 struct be_tx_obj *txo;
2339 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Sathya Perla3c8def92011-06-12 20:01:58 +00002341 for_all_tx_queues(adapter, txo, i) {
2342 q = &txo->q;
2343 if (q->created)
2344 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2345 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346
Sathya Perla3c8def92011-06-12 20:01:58 +00002347 q = &txo->cq;
2348 if (q->created)
2349 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2350 be_queue_free(adapter, q);
2351 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352}
2353
Sathya Perla77071332013-08-27 16:57:34 +05302354static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002357 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302358 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359
Sathya Perla92bf14a2013-08-27 16:57:32 +05302360 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002361
Sathya Perla3c8def92011-06-12 20:01:58 +00002362 for_all_tx_queues(adapter, txo, i) {
2363 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2365 sizeof(struct be_eth_tx_compl));
2366 if (status)
2367 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368
John Stultz827da442013-10-07 15:51:58 -07002369 u64_stats_init(&txo->stats.sync);
2370 u64_stats_init(&txo->stats.sync_compl);
2371
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 /* If num_evt_qs is less than num_tx_qs, then more than
2373 * one txq share an eq
2374 */
2375 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2376 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2377 if (status)
2378 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2381 sizeof(struct be_eth_wrb));
2382 if (status)
2383 return status;
2384
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002385 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 if (status)
2387 return status;
2388 }
2389
Sathya Perlad3791422012-09-28 04:39:44 +00002390 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2391 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392 return 0;
2393}
2394
2395static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396{
2397 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002398 struct be_rx_obj *rxo;
2399 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400
Sathya Perla3abcded2010-10-03 22:12:27 -07002401 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002402 q = &rxo->cq;
2403 if (q->created)
2404 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2405 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407}
2408
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002410{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002411 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002412 struct be_rx_obj *rxo;
2413 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414
Sathya Perla92bf14a2013-08-27 16:57:32 +05302415 /* We can create as many RSS rings as there are EQs. */
2416 adapter->num_rx_qs = adapter->num_evt_qs;
2417
2418 /* We'll use RSS only if atleast 2 RSS rings are supported.
2419 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302421 if (adapter->num_rx_qs > 1)
2422 adapter->num_rx_qs++;
2423
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002425 for_all_rx_queues(adapter, rxo, i) {
2426 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002427 cq = &rxo->cq;
2428 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302429 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002430 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002432
John Stultz827da442013-10-07 15:51:58 -07002433 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2435 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002436 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002438 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439
Sathya Perlad3791422012-09-28 04:39:44 +00002440 dev_info(&adapter->pdev->dev,
2441 "created %d RSS queue(s) and 1 default RX queue\n",
2442 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002444}
2445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446static irqreturn_t be_intx(int irq, void *dev)
2447{
Sathya Perlae49cc342012-11-27 19:50:02 +00002448 struct be_eq_obj *eqo = dev;
2449 struct be_adapter *adapter = eqo->adapter;
2450 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002451
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002452 /* IRQ is not expected when NAPI is scheduled as the EQ
2453 * will not be armed.
2454 * But, this can happen on Lancer INTx where it takes
2455 * a while to de-assert INTx or in BE2 where occasionaly
2456 * an interrupt may be raised even when EQ is unarmed.
2457 * If NAPI is already scheduled, then counting & notifying
2458 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002459 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002460 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002461 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002462 __napi_schedule(&eqo->napi);
2463 if (num_evts)
2464 eqo->spurious_intr = 0;
2465 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002466 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002467
2468 /* Return IRQ_HANDLED only for the the first spurious intr
2469 * after a valid intr to stop the kernel from branding
2470 * this irq as a bad one!
2471 */
2472 if (num_evts || eqo->spurious_intr++ == 0)
2473 return IRQ_HANDLED;
2474 else
2475 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476}
2477
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002480 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481
Sathya Perla0b545a62012-11-23 00:27:18 +00002482 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2483 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484 return IRQ_HANDLED;
2485}
2486
Sathya Perla2e588f82011-03-11 02:49:26 +00002487static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488{
Somnath Koture38b1702013-05-29 22:55:56 +00002489 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490}
2491
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002492static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302493 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494{
Sathya Perla3abcded2010-10-03 22:12:27 -07002495 struct be_adapter *adapter = rxo->adapter;
2496 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002497 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302499 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500
2501 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002502 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503 if (!rxcp)
2504 break;
2505
Sathya Perla12004ae2011-08-02 19:57:46 +00002506 /* Is it a flush compl that has no data */
2507 if (unlikely(rxcp->num_rcvd == 0))
2508 goto loop_continue;
2509
2510 /* Discard compl with partial DMA Lancer B0 */
2511 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002513 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002514 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002515
Sathya Perla12004ae2011-08-02 19:57:46 +00002516 /* On BE drop pkts that arrive due to imperfect filtering in
2517 * promiscuous mode on some skews
2518 */
2519 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302520 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002521 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002522 goto loop_continue;
2523 }
2524
Sathya Perla6384a4d2013-10-25 10:40:16 +05302525 /* Don't do gro when we're busy_polling */
2526 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002527 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002528 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302529 be_rx_compl_process(rxo, napi, rxcp);
2530
Sathya Perla12004ae2011-08-02 19:57:46 +00002531loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302532 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002533 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534 }
2535
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002536 if (work_done) {
2537 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002538
Sathya Perla6384a4d2013-10-25 10:40:16 +05302539 /* When an rx-obj gets into post_starved state, just
2540 * let be_worker do the posting.
2541 */
2542 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2543 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302544 be_post_rx_frags(rxo, GFP_ATOMIC,
2545 max_t(u32, MAX_RX_POST,
2546 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549 return work_done;
2550}
2551
Kalesh AP512bb8a2014-09-02 09:56:49 +05302552static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2553{
2554 switch (status) {
2555 case BE_TX_COMP_HDR_PARSE_ERR:
2556 tx_stats(txo)->tx_hdr_parse_err++;
2557 break;
2558 case BE_TX_COMP_NDMA_ERR:
2559 tx_stats(txo)->tx_dma_err++;
2560 break;
2561 case BE_TX_COMP_ACL_ERR:
2562 tx_stats(txo)->tx_spoof_check_err++;
2563 break;
2564 }
2565}
2566
2567static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2568{
2569 switch (status) {
2570 case LANCER_TX_COMP_LSO_ERR:
2571 tx_stats(txo)->tx_tso_err++;
2572 break;
2573 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2574 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2575 tx_stats(txo)->tx_spoof_check_err++;
2576 break;
2577 case LANCER_TX_COMP_QINQ_ERR:
2578 tx_stats(txo)->tx_qinq_err++;
2579 break;
2580 case LANCER_TX_COMP_PARITY_ERR:
2581 tx_stats(txo)->tx_internal_parity_err++;
2582 break;
2583 case LANCER_TX_COMP_DMA_ERR:
2584 tx_stats(txo)->tx_dma_err++;
2585 break;
2586 }
2587}
2588
Sathya Perlac8f64612014-09-02 09:56:55 +05302589static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2590 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302593 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302594 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302595 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002596
Sathya Perlac8f64612014-09-02 09:56:55 +05302597 while ((txcp = be_tx_compl_get(&txo->cq))) {
2598 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2599 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2600 work_done++;
2601
Kalesh AP512bb8a2014-09-02 09:56:49 +05302602 compl_status = GET_TX_COMPL_BITS(status, txcp);
2603 if (compl_status) {
2604 if (lancer_chip(adapter))
2605 lancer_update_tx_err(txo, compl_status);
2606 else
2607 be_update_tx_err(txo, compl_status);
2608 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002609 }
2610
2611 if (work_done) {
2612 be_cq_notify(adapter, txo->cq.id, true, work_done);
2613 atomic_sub(num_wrbs, &txo->q.used);
2614
2615 /* As Tx wrbs have been freed up, wake up netdev queue
2616 * if it was stopped due to lack of tx wrbs. */
2617 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302618 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002620 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002621
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002622 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2623 tx_stats(txo)->tx_compl += work_done;
2624 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2625 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626}
Sathya Perla3c8def92011-06-12 20:01:58 +00002627
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002628#ifdef CONFIG_NET_RX_BUSY_POLL
2629static inline bool be_lock_napi(struct be_eq_obj *eqo)
2630{
2631 bool status = true;
2632
2633 spin_lock(&eqo->lock); /* BH is already disabled */
2634 if (eqo->state & BE_EQ_LOCKED) {
2635 WARN_ON(eqo->state & BE_EQ_NAPI);
2636 eqo->state |= BE_EQ_NAPI_YIELD;
2637 status = false;
2638 } else {
2639 eqo->state = BE_EQ_NAPI;
2640 }
2641 spin_unlock(&eqo->lock);
2642 return status;
2643}
2644
2645static inline void be_unlock_napi(struct be_eq_obj *eqo)
2646{
2647 spin_lock(&eqo->lock); /* BH is already disabled */
2648
2649 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2650 eqo->state = BE_EQ_IDLE;
2651
2652 spin_unlock(&eqo->lock);
2653}
2654
2655static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2656{
2657 bool status = true;
2658
2659 spin_lock_bh(&eqo->lock);
2660 if (eqo->state & BE_EQ_LOCKED) {
2661 eqo->state |= BE_EQ_POLL_YIELD;
2662 status = false;
2663 } else {
2664 eqo->state |= BE_EQ_POLL;
2665 }
2666 spin_unlock_bh(&eqo->lock);
2667 return status;
2668}
2669
2670static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2671{
2672 spin_lock_bh(&eqo->lock);
2673
2674 WARN_ON(eqo->state & (BE_EQ_NAPI));
2675 eqo->state = BE_EQ_IDLE;
2676
2677 spin_unlock_bh(&eqo->lock);
2678}
2679
2680static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2681{
2682 spin_lock_init(&eqo->lock);
2683 eqo->state = BE_EQ_IDLE;
2684}
2685
2686static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2687{
2688 local_bh_disable();
2689
2690 /* It's enough to just acquire napi lock on the eqo to stop
2691 * be_busy_poll() from processing any queueus.
2692 */
2693 while (!be_lock_napi(eqo))
2694 mdelay(1);
2695
2696 local_bh_enable();
2697}
2698
2699#else /* CONFIG_NET_RX_BUSY_POLL */
2700
2701static inline bool be_lock_napi(struct be_eq_obj *eqo)
2702{
2703 return true;
2704}
2705
2706static inline void be_unlock_napi(struct be_eq_obj *eqo)
2707{
2708}
2709
2710static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2711{
2712 return false;
2713}
2714
2715static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2716{
2717}
2718
2719static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2720{
2721}
2722
2723static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2724{
2725}
2726#endif /* CONFIG_NET_RX_BUSY_POLL */
2727
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302728int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002729{
2730 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2731 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002732 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302733 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302734 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002735
Sathya Perla0b545a62012-11-23 00:27:18 +00002736 num_evts = events_get(eqo);
2737
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302738 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2739 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740
Sathya Perla6384a4d2013-10-25 10:40:16 +05302741 if (be_lock_napi(eqo)) {
2742 /* This loop will iterate twice for EQ0 in which
2743 * completions of the last RXQ (default one) are also processed
2744 * For other EQs the loop iterates only once
2745 */
2746 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2747 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2748 max_work = max(work, max_work);
2749 }
2750 be_unlock_napi(eqo);
2751 } else {
2752 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002753 }
2754
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755 if (is_mcc_eqo(eqo))
2756 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002757
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002758 if (max_work < budget) {
2759 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002760 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 } else {
2762 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002763 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002764 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002765 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766}
2767
Sathya Perla6384a4d2013-10-25 10:40:16 +05302768#ifdef CONFIG_NET_RX_BUSY_POLL
2769static int be_busy_poll(struct napi_struct *napi)
2770{
2771 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2772 struct be_adapter *adapter = eqo->adapter;
2773 struct be_rx_obj *rxo;
2774 int i, work = 0;
2775
2776 if (!be_lock_busy_poll(eqo))
2777 return LL_FLUSH_BUSY;
2778
2779 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2780 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2781 if (work)
2782 break;
2783 }
2784
2785 be_unlock_busy_poll(eqo);
2786 return work;
2787}
2788#endif
2789
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002790void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002791{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002792 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2793 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002794 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302795 bool error_detected = false;
2796 struct device *dev = &adapter->pdev->dev;
2797 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002798
Sathya Perlad23e9462012-12-17 19:38:51 +00002799 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002800 return;
2801
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002802 if (lancer_chip(adapter)) {
2803 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2804 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2805 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302806 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002807 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302808 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302809 adapter->hw_error = true;
2810 /* Do not log error messages if its a FW reset */
2811 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2812 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2813 dev_info(dev, "Firmware update in progress\n");
2814 } else {
2815 error_detected = true;
2816 dev_err(dev, "Error detected in the card\n");
2817 dev_err(dev, "ERR: sliport status 0x%x\n",
2818 sliport_status);
2819 dev_err(dev, "ERR: sliport error1 0x%x\n",
2820 sliport_err1);
2821 dev_err(dev, "ERR: sliport error2 0x%x\n",
2822 sliport_err2);
2823 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002824 }
2825 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04002826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2828 ue_lo_mask = ioread32(adapter->pcicfg +
2829 PCICFG_UE_STATUS_LOW_MASK);
2830 ue_hi_mask = ioread32(adapter->pcicfg +
2831 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002832
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002833 ue_lo = (ue_lo & ~ue_lo_mask);
2834 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002835
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302836 /* On certain platforms BE hardware can indicate spurious UEs.
2837 * Allow HW to stop working completely in case of a real UE.
2838 * Hence not setting the hw_error for UE detection.
2839 */
2840
2841 if (ue_lo || ue_hi) {
2842 error_detected = true;
2843 dev_err(dev,
2844 "Unrecoverable Error detected in the adapter");
2845 dev_err(dev, "Please reboot server to recover");
2846 if (skyhawk_chip(adapter))
2847 adapter->hw_error = true;
2848 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2849 if (ue_lo & 1)
2850 dev_err(dev, "UE: %s bit set\n",
2851 ue_status_low_desc[i]);
2852 }
2853 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2854 if (ue_hi & 1)
2855 dev_err(dev, "UE: %s bit set\n",
2856 ue_status_hi_desc[i]);
2857 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302858 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002859 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302860 if (error_detected)
2861 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002862}
2863
Sathya Perla8d56ff12009-11-22 22:02:26 +00002864static void be_msix_disable(struct be_adapter *adapter)
2865{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002866 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002867 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002868 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302869 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002870 }
2871}
2872
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002873static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002874{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002875 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002876 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002877
Sathya Perla92bf14a2013-08-27 16:57:32 +05302878 /* If RoCE is supported, program the max number of NIC vectors that
2879 * may be configured via set-channels, along with vectors needed for
2880 * RoCe. Else, just program the number we'll use initially.
2881 */
2882 if (be_roce_supported(adapter))
2883 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2884 2 * num_online_cpus());
2885 else
2886 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002887
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002888 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002889 adapter->msix_entries[i].entry = i;
2890
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002891 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2892 MIN_MSIX_VECTORS, num_vec);
2893 if (num_vec < 0)
2894 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002895
Sathya Perla92bf14a2013-08-27 16:57:32 +05302896 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2897 adapter->num_msix_roce_vec = num_vec / 2;
2898 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2899 adapter->num_msix_roce_vec);
2900 }
2901
2902 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2903
2904 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2905 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002906 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002907
2908fail:
2909 dev_warn(dev, "MSIx enable failed\n");
2910
2911 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2912 if (!be_physfn(adapter))
2913 return num_vec;
2914 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915}
2916
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002917static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302918 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302920 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002921}
2922
2923static int be_msix_register(struct be_adapter *adapter)
2924{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 struct net_device *netdev = adapter->netdev;
2926 struct be_eq_obj *eqo;
2927 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 for_all_evt_queues(adapter, eqo, i) {
2930 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2931 vec = be_msix_vec_get(adapter, eqo);
2932 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002933 if (status)
2934 goto err_msix;
2935 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002938err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002939 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2940 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2941 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302942 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002943 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944 return status;
2945}
2946
2947static int be_irq_register(struct be_adapter *adapter)
2948{
2949 struct net_device *netdev = adapter->netdev;
2950 int status;
2951
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002952 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002953 status = be_msix_register(adapter);
2954 if (status == 0)
2955 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002956 /* INTx is not supported for VF */
2957 if (!be_physfn(adapter))
2958 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002959 }
2960
Sathya Perlae49cc342012-11-27 19:50:02 +00002961 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962 netdev->irq = adapter->pdev->irq;
2963 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002964 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965 if (status) {
2966 dev_err(&adapter->pdev->dev,
2967 "INTx request IRQ failed - err %d\n", status);
2968 return status;
2969 }
2970done:
2971 adapter->isr_registered = true;
2972 return 0;
2973}
2974
2975static void be_irq_unregister(struct be_adapter *adapter)
2976{
2977 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002978 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002979 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
2981 if (!adapter->isr_registered)
2982 return;
2983
2984 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002985 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002986 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002987 goto done;
2988 }
2989
2990 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002991 for_all_evt_queues(adapter, eqo, i)
2992 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002993
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002994done:
2995 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002996}
2997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002998static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002999{
3000 struct be_queue_info *q;
3001 struct be_rx_obj *rxo;
3002 int i;
3003
3004 for_all_rx_queues(adapter, rxo, i) {
3005 q = &rxo->q;
3006 if (q->created) {
3007 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003008 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003009 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003010 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003011 }
3012}
3013
Sathya Perla889cd4b2010-05-30 23:33:45 +00003014static int be_close(struct net_device *netdev)
3015{
3016 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003017 struct be_eq_obj *eqo;
3018 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003019
Kalesh APe1ad8e32014-04-14 16:12:41 +05303020 /* This protection is needed as be_close() may be called even when the
3021 * adapter is in cleared state (after eeh perm failure)
3022 */
3023 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3024 return 0;
3025
Parav Pandit045508a2012-03-26 14:27:13 +00003026 be_roce_dev_close(adapter);
3027
Ivan Veceradff345c52013-11-27 08:59:32 +01003028 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3029 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003030 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303031 be_disable_busy_poll(eqo);
3032 }
David S. Miller71237b62013-11-28 18:53:36 -05003033 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003034 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003035
3036 be_async_mcc_disable(adapter);
3037
3038 /* Wait for all pending tx completions to arrive so that
3039 * all tx skbs are freed.
3040 */
Sathya Perlafba87552013-05-08 02:05:50 +00003041 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303042 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003043
3044 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003045 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003046
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003047 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003048 if (msix_enabled(adapter))
3049 synchronize_irq(be_msix_vec_get(adapter, eqo));
3050 else
3051 synchronize_irq(netdev->irq);
3052 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003053 }
3054
Sathya Perla889cd4b2010-05-30 23:33:45 +00003055 be_irq_unregister(adapter);
3056
Sathya Perla482c9e72011-06-29 23:33:17 +00003057 return 0;
3058}
3059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003060static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003061{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003062 struct rss_info *rss = &adapter->rss_info;
3063 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003064 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003065 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003066
3067 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003068 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3069 sizeof(struct be_eth_rx_d));
3070 if (rc)
3071 return rc;
3072 }
3073
3074 /* The FW would like the default RXQ to be created first */
3075 rxo = default_rxo(adapter);
3076 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3077 adapter->if_handle, false, &rxo->rss_id);
3078 if (rc)
3079 return rc;
3080
3081 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003082 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003083 rx_frag_size, adapter->if_handle,
3084 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003085 if (rc)
3086 return rc;
3087 }
3088
3089 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303090 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3091 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003092 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303093 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003094 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303095 rss->rsstable[j + i] = rxo->rss_id;
3096 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003097 }
3098 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303099 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3100 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003101
3102 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303103 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3104 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303105 } else {
3106 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303107 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303108 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003109
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003110 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303111 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003112 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303113 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303114 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303115 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003116 }
3117
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003118 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303119
Sathya Perla482c9e72011-06-29 23:33:17 +00003120 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003121 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303122 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003123 return 0;
3124}
3125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126static int be_open(struct net_device *netdev)
3127{
3128 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003130 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003131 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003132 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003133 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003135 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003136 if (status)
3137 goto err;
3138
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003139 status = be_irq_register(adapter);
3140 if (status)
3141 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003142
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003143 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003144 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003145
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003146 for_all_tx_queues(adapter, txo, i)
3147 be_cq_notify(adapter, txo->cq.id, true, 0);
3148
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003149 be_async_mcc_enable(adapter);
3150
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003151 for_all_evt_queues(adapter, eqo, i) {
3152 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303153 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303154 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003155 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003156 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003157
Sathya Perla323ff712012-09-28 04:39:43 +00003158 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003159 if (!status)
3160 be_link_status_update(adapter, link_status);
3161
Sathya Perlafba87552013-05-08 02:05:50 +00003162 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003163 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303164
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303165#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303166 if (skyhawk_chip(adapter))
3167 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303168#endif
3169
Sathya Perla889cd4b2010-05-30 23:33:45 +00003170 return 0;
3171err:
3172 be_close(adapter->netdev);
3173 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003174}
3175
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003176static int be_setup_wol(struct be_adapter *adapter, bool enable)
3177{
3178 struct be_dma_mem cmd;
3179 int status = 0;
3180 u8 mac[ETH_ALEN];
3181
3182 memset(mac, 0, ETH_ALEN);
3183
3184 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003185 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3186 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303187 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303188 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003189
3190 if (enable) {
3191 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303192 PCICFG_PM_CONTROL_OFFSET,
3193 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003194 if (status) {
3195 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003196 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003197 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3198 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003199 return status;
3200 }
3201 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303202 adapter->netdev->dev_addr,
3203 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003204 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3205 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3206 } else {
3207 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3208 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3209 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3210 }
3211
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003212 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003213 return status;
3214}
3215
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003216static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3217{
3218 u32 addr;
3219
3220 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3221
3222 mac[5] = (u8)(addr & 0xFF);
3223 mac[4] = (u8)((addr >> 8) & 0xFF);
3224 mac[3] = (u8)((addr >> 16) & 0xFF);
3225 /* Use the OUI from the current MAC address */
3226 memcpy(mac, adapter->netdev->dev_addr, 3);
3227}
3228
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003229/*
3230 * Generate a seed MAC address from the PF MAC Address using jhash.
3231 * MAC Address for VFs are assigned incrementally starting from the seed.
3232 * These addresses are programmed in the ASIC by the PF and the VF driver
3233 * queries for the MAC address during its probe.
3234 */
Sathya Perla4c876612013-02-03 20:30:11 +00003235static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003236{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003237 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003238 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003239 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003240 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003241
3242 be_vf_eth_addr_generate(adapter, mac);
3243
Sathya Perla11ac75e2011-12-13 00:58:50 +00003244 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303245 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003246 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003247 vf_cfg->if_handle,
3248 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303249 else
3250 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3251 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003252
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003253 if (status)
3254 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303255 "Mac address assignment failed for VF %d\n",
3256 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003257 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003258 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003259
3260 mac[5] += 1;
3261 }
3262 return status;
3263}
3264
Sathya Perla4c876612013-02-03 20:30:11 +00003265static int be_vfs_mac_query(struct be_adapter *adapter)
3266{
3267 int status, vf;
3268 u8 mac[ETH_ALEN];
3269 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003270
3271 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303272 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3273 mac, vf_cfg->if_handle,
3274 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003275 if (status)
3276 return status;
3277 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3278 }
3279 return 0;
3280}
3281
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003282static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003283{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003284 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003285 u32 vf;
3286
Sathya Perla257a3fe2013-06-14 15:54:51 +05303287 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003288 dev_warn(&adapter->pdev->dev,
3289 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003290 goto done;
3291 }
3292
Sathya Perlab4c1df92013-05-08 02:05:47 +00003293 pci_disable_sriov(adapter->pdev);
3294
Sathya Perla11ac75e2011-12-13 00:58:50 +00003295 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303296 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003297 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3298 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303299 else
3300 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3301 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003302
Sathya Perla11ac75e2011-12-13 00:58:50 +00003303 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3304 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003305done:
3306 kfree(adapter->vf_cfg);
3307 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303308 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003309}
3310
Sathya Perla77071332013-08-27 16:57:34 +05303311static void be_clear_queues(struct be_adapter *adapter)
3312{
3313 be_mcc_queues_destroy(adapter);
3314 be_rx_cqs_destroy(adapter);
3315 be_tx_queues_destroy(adapter);
3316 be_evt_queues_destroy(adapter);
3317}
3318
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303319static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003320{
Sathya Perla191eb752012-02-23 18:50:13 +00003321 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3322 cancel_delayed_work_sync(&adapter->work);
3323 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3324 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303325}
3326
Somnath Koturb05004a2013-12-05 12:08:16 +05303327static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303328{
Somnath Koturb05004a2013-12-05 12:08:16 +05303329 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003330 be_cmd_pmac_del(adapter, adapter->if_handle,
3331 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303332 kfree(adapter->pmac_id);
3333 adapter->pmac_id = NULL;
3334 }
3335}
3336
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303337#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303338static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3339{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003340 struct net_device *netdev = adapter->netdev;
3341
Sathya Perlac9c47142014-03-27 10:46:19 +05303342 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3343 be_cmd_manage_iface(adapter, adapter->if_handle,
3344 OP_CONVERT_TUNNEL_TO_NORMAL);
3345
3346 if (adapter->vxlan_port)
3347 be_cmd_set_vxlan_port(adapter, 0);
3348
3349 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3350 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003351
3352 netdev->hw_enc_features = 0;
3353 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303354 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303355}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303356#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303357
Somnath Koturb05004a2013-12-05 12:08:16 +05303358static int be_clear(struct be_adapter *adapter)
3359{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303360 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003361
Sathya Perla11ac75e2011-12-13 00:58:50 +00003362 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003363 be_vf_clear(adapter);
3364
Vasundhara Volambec84e62014-06-30 13:01:32 +05303365 /* Re-configure FW to distribute resources evenly across max-supported
3366 * number of VFs, only when VFs are not already enabled.
3367 */
3368 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3369 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3370 pci_sriov_get_totalvfs(adapter->pdev));
3371
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303372#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303373 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303374#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303375 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303376 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003377
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003378 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003379
Sathya Perla77071332013-08-27 16:57:34 +05303380 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003382 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303383 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003384 return 0;
3385}
3386
Kalesh AP0700d812015-01-20 03:51:43 -05003387static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3388 u32 cap_flags, u32 vf)
3389{
3390 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003391
3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3394 BE_IF_FLAGS_RSS;
3395
3396 en_flags &= cap_flags;
3397
Vasundhara Volam435452a2015-03-20 06:28:23 -04003398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003399}
3400
Sathya Perla4c876612013-02-03 20:30:11 +00003401static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003402{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303403 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003404 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003405 u32 cap_flags, vf;
3406 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003407
Kalesh AP0700d812015-01-20 03:51:43 -05003408 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003409 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3410 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003411
Sathya Perla4c876612013-02-03 20:30:11 +00003412 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303413 if (!BE3_chip(adapter)) {
3414 status = be_cmd_get_profile_config(adapter, &res,
3415 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003416 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303417 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303423 }
Sathya Perla4c876612013-02-03 20:30:11 +00003424
Kalesh AP0700d812015-01-20 03:51:43 -05003425 status = be_if_create(adapter, &vf_cfg->if_handle,
3426 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003427 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003428 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003429 }
Kalesh AP0700d812015-01-20 03:51:43 -05003430
3431 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003432}
3433
Sathya Perla39f1d942012-05-08 19:41:24 +00003434static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003435{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003436 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003437 int vf;
3438
Sathya Perla39f1d942012-05-08 19:41:24 +00003439 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3440 GFP_KERNEL);
3441 if (!adapter->vf_cfg)
3442 return -ENOMEM;
3443
Sathya Perla11ac75e2011-12-13 00:58:50 +00003444 for_all_vfs(adapter, vf_cfg, vf) {
3445 vf_cfg->if_handle = -1;
3446 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003447 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003448 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003449}
3450
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003451static int be_vf_setup(struct be_adapter *adapter)
3452{
Sathya Perla4c876612013-02-03 20:30:11 +00003453 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303454 struct be_vf_cfg *vf_cfg;
3455 int status, old_vfs, vf;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003456
Sathya Perla257a3fe2013-06-14 15:54:51 +05303457 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003458
3459 status = be_vf_setup_init(adapter);
3460 if (status)
3461 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003462
Sathya Perla4c876612013-02-03 20:30:11 +00003463 if (old_vfs) {
3464 for_all_vfs(adapter, vf_cfg, vf) {
3465 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3466 if (status)
3467 goto err;
3468 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003469
Sathya Perla4c876612013-02-03 20:30:11 +00003470 status = be_vfs_mac_query(adapter);
3471 if (status)
3472 goto err;
3473 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303474 status = be_vfs_if_create(adapter);
3475 if (status)
3476 goto err;
3477
Sathya Perla39f1d942012-05-08 19:41:24 +00003478 status = be_vf_eth_addr_config(adapter);
3479 if (status)
3480 goto err;
3481 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003482
Sathya Perla11ac75e2011-12-13 00:58:50 +00003483 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303484 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303488 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003489 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303490 BE_PRIV_FILTMGMT,
3491 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3495 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003496 }
Sathya Perla04a06022013-07-23 15:25:00 +05303497 }
3498
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303499 /* Allow full available bandwidth */
3500 if (!old_vfs)
3501 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003502
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303503 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303504 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303505 be_cmd_set_logical_link_config(adapter,
3506 IFLA_VF_LINK_STATE_AUTO,
3507 vf+1);
3508 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003509 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003510
3511 if (!old_vfs) {
3512 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3513 if (status) {
3514 dev_err(dev, "SRIOV enable failed\n");
3515 adapter->num_vfs = 0;
3516 goto err;
3517 }
3518 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303519
3520 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003521 return 0;
3522err:
Sathya Perla4c876612013-02-03 20:30:11 +00003523 dev_err(dev, "VF setup failed\n");
3524 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003525 return status;
3526}
3527
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303528/* Converting function_mode bits on BE3 to SH mc_type enums */
3529
3530static u8 be_convert_mc_type(u32 function_mode)
3531{
Suresh Reddy66064db2014-06-23 16:41:29 +05303532 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303533 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303534 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303535 return FLEX10;
3536 else if (function_mode & VNIC_MODE)
3537 return vNIC2;
3538 else if (function_mode & UMC_ENABLED)
3539 return UMC;
3540 else
3541 return MC_NONE;
3542}
3543
Sathya Perla92bf14a2013-08-27 16:57:32 +05303544/* On BE2/BE3 FW does not suggest the supported limits */
3545static void BEx_get_resources(struct be_adapter *adapter,
3546 struct be_resources *res)
3547{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303548 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303549
3550 if (be_physfn(adapter))
3551 res->max_uc_mac = BE_UC_PMAC_COUNT;
3552 else
3553 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3554
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303555 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3556
3557 if (be_is_mc(adapter)) {
3558 /* Assuming that there are 4 channels per port,
3559 * when multi-channel is enabled
3560 */
3561 if (be_is_qnq_mode(adapter))
3562 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3563 else
3564 /* In a non-qnq multichannel mode, the pvid
3565 * takes up one vlan entry
3566 */
3567 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3568 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303569 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303570 }
3571
Sathya Perla92bf14a2013-08-27 16:57:32 +05303572 res->max_mcast_mac = BE_MAX_MC;
3573
Vasundhara Volama5243da2014-03-11 18:53:07 +05303574 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3575 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3576 * *only* if it is RSS-capable.
3577 */
3578 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3579 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303580 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303581 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303582 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3583 struct be_resources super_nic_res = {0};
3584
3585 /* On a SuperNIC profile, the driver needs to use the
3586 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3587 */
3588 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3589 /* Some old versions of BE3 FW don't report max_tx_qs value */
3590 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3591 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303592 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303593 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303594
3595 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3596 !use_sriov && be_physfn(adapter))
3597 res->max_rss_qs = (adapter->be3_native) ?
3598 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3599 res->max_rx_qs = res->max_rss_qs + 1;
3600
Suresh Reddye3dc8672014-01-06 13:02:25 +05303601 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303602 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303603 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3604 else
3605 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303606
3607 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3608 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3609 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3610}
3611
Sathya Perla30128032011-11-10 19:17:57 +00003612static void be_setup_init(struct be_adapter *adapter)
3613{
3614 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003615 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003616 adapter->if_handle = -1;
3617 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003618 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003619 if (be_physfn(adapter))
3620 adapter->cmd_privileges = MAX_PRIVILEGES;
3621 else
3622 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003623}
3624
Vasundhara Volambec84e62014-06-30 13:01:32 +05303625static int be_get_sriov_config(struct be_adapter *adapter)
3626{
3627 struct device *dev = &adapter->pdev->dev;
3628 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303629 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303630
3631 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303632 be_cmd_get_profile_config(adapter, &res, 0);
3633
Vasundhara Volambec84e62014-06-30 13:01:32 +05303634 if (BE3_chip(adapter) && !res.max_vfs) {
3635 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3636 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3637 }
3638
Sathya Perlad3d18312014-08-01 17:47:30 +05303639 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303640
3641 if (!be_max_vfs(adapter)) {
3642 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303643 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303644 adapter->num_vfs = 0;
3645 return 0;
3646 }
3647
Sathya Perlad3d18312014-08-01 17:47:30 +05303648 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3649
Vasundhara Volambec84e62014-06-30 13:01:32 +05303650 /* validate num_vfs module param */
3651 old_vfs = pci_num_vf(adapter->pdev);
3652 if (old_vfs) {
3653 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3654 if (old_vfs != num_vfs)
3655 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3656 adapter->num_vfs = old_vfs;
3657 } else {
3658 if (num_vfs > be_max_vfs(adapter)) {
3659 dev_info(dev, "Resources unavailable to init %d VFs\n",
3660 num_vfs);
3661 dev_info(dev, "Limiting to %d VFs\n",
3662 be_max_vfs(adapter));
3663 }
3664 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3665 }
3666
3667 return 0;
3668}
3669
Sathya Perla92bf14a2013-08-27 16:57:32 +05303670static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003671{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303672 struct device *dev = &adapter->pdev->dev;
3673 struct be_resources res = {0};
3674 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003675
Sathya Perla92bf14a2013-08-27 16:57:32 +05303676 if (BEx_chip(adapter)) {
3677 BEx_get_resources(adapter, &res);
3678 adapter->res = res;
3679 }
3680
Sathya Perla92bf14a2013-08-27 16:57:32 +05303681 /* For Lancer, SH etc read per-function resource limits from FW.
3682 * GET_FUNC_CONFIG returns per function guaranteed limits.
3683 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3684 */
Sathya Perla4c876612013-02-03 20:30:11 +00003685 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303686 status = be_cmd_get_func_config(adapter, &res);
3687 if (status)
3688 return status;
3689
3690 /* If RoCE may be enabled stash away half the EQs for RoCE */
3691 if (be_roce_supported(adapter))
3692 res.max_evt_qs /= 2;
3693 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003694 }
3695
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303696 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3697 be_max_txqs(adapter), be_max_rxqs(adapter),
3698 be_max_rss(adapter), be_max_eqs(adapter),
3699 be_max_vfs(adapter));
3700 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3701 be_max_uc(adapter), be_max_mc(adapter),
3702 be_max_vlans(adapter));
3703
Sathya Perla92bf14a2013-08-27 16:57:32 +05303704 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003705}
3706
Sathya Perlad3d18312014-08-01 17:47:30 +05303707static void be_sriov_config(struct be_adapter *adapter)
3708{
3709 struct device *dev = &adapter->pdev->dev;
3710 int status;
3711
3712 status = be_get_sriov_config(adapter);
3713 if (status) {
3714 dev_err(dev, "Failed to query SR-IOV configuration\n");
3715 dev_err(dev, "SR-IOV cannot be enabled\n");
3716 return;
3717 }
3718
3719 /* When the HW is in SRIOV capable configuration, the PF-pool
3720 * resources are equally distributed across the max-number of
3721 * VFs. The user may request only a subset of the max-vfs to be
3722 * enabled. Based on num_vfs, redistribute the resources across
3723 * num_vfs so that each VF will have access to more number of
3724 * resources. This facility is not available in BE3 FW.
3725 * Also, this is done by FW in Lancer chip.
3726 */
3727 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3728 status = be_cmd_set_sriov_config(adapter,
3729 adapter->pool_res,
3730 adapter->num_vfs);
3731 if (status)
3732 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3733 }
3734}
3735
Sathya Perla39f1d942012-05-08 19:41:24 +00003736static int be_get_config(struct be_adapter *adapter)
3737{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303738 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003739 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003740
Kalesh APe97e3cd2014-07-17 16:20:26 +05303741 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003742 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303743 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003744
Vasundhara Volam21252372015-02-06 08:18:42 -05003745 be_cmd_query_port_name(adapter);
3746
3747 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303748 status = be_cmd_get_active_profile(adapter, &profile_id);
3749 if (!status)
3750 dev_info(&adapter->pdev->dev,
3751 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303752 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303753
Sathya Perlad3d18312014-08-01 17:47:30 +05303754 if (!BE2_chip(adapter) && be_physfn(adapter))
3755 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303756
Sathya Perla92bf14a2013-08-27 16:57:32 +05303757 status = be_get_resources(adapter);
3758 if (status)
3759 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003760
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303761 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3762 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303763 if (!adapter->pmac_id)
3764 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003765
Sathya Perla92bf14a2013-08-27 16:57:32 +05303766 /* Sanitize cfg_num_qs based on HW and platform limits */
3767 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3768
3769 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003770}
3771
Sathya Perla95046b92013-07-23 15:25:02 +05303772static int be_mac_setup(struct be_adapter *adapter)
3773{
3774 u8 mac[ETH_ALEN];
3775 int status;
3776
3777 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3778 status = be_cmd_get_perm_mac(adapter, mac);
3779 if (status)
3780 return status;
3781
3782 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3783 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3784 } else {
3785 /* Maybe the HW was reset; dev_addr must be re-programmed */
3786 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3787 }
3788
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003789 /* For BE3-R VFs, the PF programs the initial MAC address */
3790 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3791 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3792 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303793 return 0;
3794}
3795
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303796static void be_schedule_worker(struct be_adapter *adapter)
3797{
3798 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3799 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3800}
3801
Sathya Perla77071332013-08-27 16:57:34 +05303802static int be_setup_queues(struct be_adapter *adapter)
3803{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303804 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303805 int status;
3806
3807 status = be_evt_queues_create(adapter);
3808 if (status)
3809 goto err;
3810
3811 status = be_tx_qs_create(adapter);
3812 if (status)
3813 goto err;
3814
3815 status = be_rx_cqs_create(adapter);
3816 if (status)
3817 goto err;
3818
3819 status = be_mcc_queues_create(adapter);
3820 if (status)
3821 goto err;
3822
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303823 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3824 if (status)
3825 goto err;
3826
3827 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3828 if (status)
3829 goto err;
3830
Sathya Perla77071332013-08-27 16:57:34 +05303831 return 0;
3832err:
3833 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3834 return status;
3835}
3836
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303837int be_update_queues(struct be_adapter *adapter)
3838{
3839 struct net_device *netdev = adapter->netdev;
3840 int status;
3841
3842 if (netif_running(netdev))
3843 be_close(netdev);
3844
3845 be_cancel_worker(adapter);
3846
3847 /* If any vectors have been shared with RoCE we cannot re-program
3848 * the MSIx table.
3849 */
3850 if (!adapter->num_msix_roce_vec)
3851 be_msix_disable(adapter);
3852
3853 be_clear_queues(adapter);
3854
3855 if (!msix_enabled(adapter)) {
3856 status = be_msix_enable(adapter);
3857 if (status)
3858 return status;
3859 }
3860
3861 status = be_setup_queues(adapter);
3862 if (status)
3863 return status;
3864
3865 be_schedule_worker(adapter);
3866
3867 if (netif_running(netdev))
3868 status = be_open(netdev);
3869
3870 return status;
3871}
3872
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003873static inline int fw_major_num(const char *fw_ver)
3874{
3875 int fw_major = 0, i;
3876
3877 i = sscanf(fw_ver, "%d.", &fw_major);
3878 if (i != 1)
3879 return 0;
3880
3881 return fw_major;
3882}
3883
Sathya Perla5fb379e2009-06-18 00:02:59 +00003884static int be_setup(struct be_adapter *adapter)
3885{
Sathya Perla39f1d942012-05-08 19:41:24 +00003886 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003887 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003888
Sathya Perla30128032011-11-10 19:17:57 +00003889 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003890
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003891 if (!lancer_chip(adapter))
3892 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003893
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003894 status = be_get_config(adapter);
3895 if (status)
3896 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003897
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003898 status = be_msix_enable(adapter);
3899 if (status)
3900 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003901
Kalesh AP0700d812015-01-20 03:51:43 -05003902 status = be_if_create(adapter, &adapter->if_handle,
3903 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003904 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003905 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003906
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303907 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3908 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303909 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303910 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003911 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003912 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003913
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003914 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003915
Sathya Perla95046b92013-07-23 15:25:02 +05303916 status = be_mac_setup(adapter);
3917 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003918 goto err;
3919
Kalesh APe97e3cd2014-07-17 16:20:26 +05303920 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303921 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003922
Somnath Koture9e2a902013-10-24 14:37:53 +05303923 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303924 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303925 adapter->fw_ver);
3926 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3927 }
3928
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003929 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003930 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003931
3932 be_set_rx_mode(adapter->netdev);
3933
Suresh Reddy76a9e082014-01-15 13:23:40 +05303934 be_cmd_get_acpi_wol_cap(adapter);
3935
Kalesh AP00d594c2015-01-20 03:51:44 -05003936 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3937 adapter->rx_fc);
3938 if (status)
3939 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3940 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003941
Kalesh AP00d594c2015-01-20 03:51:44 -05003942 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3943 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003944
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303945 if (be_physfn(adapter))
3946 be_cmd_set_logical_link_config(adapter,
3947 IFLA_VF_LINK_STATE_AUTO, 0);
3948
Vasundhara Volambec84e62014-06-30 13:01:32 +05303949 if (adapter->num_vfs)
3950 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003951
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003952 status = be_cmd_get_phy_info(adapter);
3953 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003954 adapter->phy.fc_autoneg = 1;
3955
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303956 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303957 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003958 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003959err:
3960 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003961 return status;
3962}
3963
Ivan Vecera66268732011-12-08 01:31:21 +00003964#ifdef CONFIG_NET_POLL_CONTROLLER
3965static void be_netpoll(struct net_device *netdev)
3966{
3967 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003968 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003969 int i;
3970
Sathya Perlae49cc342012-11-27 19:50:02 +00003971 for_all_evt_queues(adapter, eqo, i) {
3972 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3973 napi_schedule(&eqo->napi);
3974 }
Ivan Vecera66268732011-12-08 01:31:21 +00003975}
3976#endif
3977
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303978static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003979
Sathya Perla306f1342011-08-02 19:57:45 +00003980static bool phy_flashing_required(struct be_adapter *adapter)
3981{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05003982 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003983 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003984}
3985
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003986static bool is_comp_in_ufi(struct be_adapter *adapter,
3987 struct flash_section_info *fsec, int type)
3988{
3989 int i = 0, img_type = 0;
3990 struct flash_section_info_g2 *fsec_g2 = NULL;
3991
Sathya Perlaca34fe32012-11-06 17:48:56 +00003992 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003993 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3994
3995 for (i = 0; i < MAX_FLASH_COMP; i++) {
3996 if (fsec_g2)
3997 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3998 else
3999 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4000
4001 if (img_type == type)
4002 return true;
4003 }
4004 return false;
4005
4006}
4007
Jingoo Han4188e7d2013-08-05 18:02:02 +09004008static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304009 int header_size,
4010 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004011{
4012 struct flash_section_info *fsec = NULL;
4013 const u8 *p = fw->data;
4014
4015 p += header_size;
4016 while (p < (fw->data + fw->size)) {
4017 fsec = (struct flash_section_info *)p;
4018 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4019 return fsec;
4020 p += 32;
4021 }
4022 return NULL;
4023}
4024
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304025static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4026 u32 img_offset, u32 img_size, int hdr_size,
4027 u16 img_optype, bool *crc_match)
4028{
4029 u32 crc_offset;
4030 int status;
4031 u8 crc[4];
4032
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004033 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4034 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304035 if (status)
4036 return status;
4037
4038 crc_offset = hdr_size + img_offset + img_size - 4;
4039
4040 /* Skip flashing, if crc of flashed region matches */
4041 if (!memcmp(crc, p + crc_offset, 4))
4042 *crc_match = true;
4043 else
4044 *crc_match = false;
4045
4046 return status;
4047}
4048
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004049static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004050 struct be_dma_mem *flash_cmd, int optype, int img_size,
4051 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004052{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004053 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004054 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304055 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004056
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004057 while (total_bytes) {
4058 num_bytes = min_t(u32, 32*1024, total_bytes);
4059
4060 total_bytes -= num_bytes;
4061
4062 if (!total_bytes) {
4063 if (optype == OPTYPE_PHY_FW)
4064 flash_op = FLASHROM_OPER_PHY_FLASH;
4065 else
4066 flash_op = FLASHROM_OPER_FLASH;
4067 } else {
4068 if (optype == OPTYPE_PHY_FW)
4069 flash_op = FLASHROM_OPER_PHY_SAVE;
4070 else
4071 flash_op = FLASHROM_OPER_SAVE;
4072 }
4073
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004074 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004075 img += num_bytes;
4076 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004077 flash_op, img_offset +
4078 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304079 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304080 optype == OPTYPE_PHY_FW)
4081 break;
4082 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004083 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004084
4085 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004086 }
4087 return 0;
4088}
4089
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004090/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004091static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304092 const struct firmware *fw,
4093 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004094{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004095 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304096 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004097 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304098 int status, i, filehdr_size, num_comp;
4099 const struct flash_comp *pflashcomp;
4100 bool crc_match;
4101 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004102
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004103 struct flash_comp gen3_flash_types[] = {
4104 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4105 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4106 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4107 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4108 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4109 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4110 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4111 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4112 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4113 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4114 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4115 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4116 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4117 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4118 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4119 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4120 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4121 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4122 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4123 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004124 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004125
4126 struct flash_comp gen2_flash_types[] = {
4127 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4128 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4129 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4130 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4131 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4132 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4133 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4134 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4135 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4136 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4137 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4138 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4139 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4140 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4141 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4142 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004143 };
4144
Sathya Perlaca34fe32012-11-06 17:48:56 +00004145 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004146 pflashcomp = gen3_flash_types;
4147 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004148 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004149 } else {
4150 pflashcomp = gen2_flash_types;
4151 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004152 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004153 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004154 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004155
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004156 /* Get flash section info*/
4157 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4158 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304159 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004160 return -1;
4161 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004162 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004163 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004164 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004165
4166 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4167 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4168 continue;
4169
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004170 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4171 !phy_flashing_required(adapter))
4172 continue;
4173
4174 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304175 status = be_check_flash_crc(adapter, fw->data,
4176 pflashcomp[i].offset,
4177 pflashcomp[i].size,
4178 filehdr_size +
4179 img_hdrs_size,
4180 OPTYPE_REDBOOT, &crc_match);
4181 if (status) {
4182 dev_err(dev,
4183 "Could not get CRC for 0x%x region\n",
4184 pflashcomp[i].optype);
4185 continue;
4186 }
4187
4188 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004189 continue;
4190 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004191
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304192 p = fw->data + filehdr_size + pflashcomp[i].offset +
4193 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004194 if (p + pflashcomp[i].size > fw->data + fw->size)
4195 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004196
4197 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004198 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004199 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304200 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004201 pflashcomp[i].img_type);
4202 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004203 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004204 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004205 return 0;
4206}
4207
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304208static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4209{
4210 u32 img_type = le32_to_cpu(fsec_entry.type);
4211 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4212
4213 if (img_optype != 0xFFFF)
4214 return img_optype;
4215
4216 switch (img_type) {
4217 case IMAGE_FIRMWARE_iSCSI:
4218 img_optype = OPTYPE_ISCSI_ACTIVE;
4219 break;
4220 case IMAGE_BOOT_CODE:
4221 img_optype = OPTYPE_REDBOOT;
4222 break;
4223 case IMAGE_OPTION_ROM_ISCSI:
4224 img_optype = OPTYPE_BIOS;
4225 break;
4226 case IMAGE_OPTION_ROM_PXE:
4227 img_optype = OPTYPE_PXE_BIOS;
4228 break;
4229 case IMAGE_OPTION_ROM_FCoE:
4230 img_optype = OPTYPE_FCOE_BIOS;
4231 break;
4232 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4233 img_optype = OPTYPE_ISCSI_BACKUP;
4234 break;
4235 case IMAGE_NCSI:
4236 img_optype = OPTYPE_NCSI_FW;
4237 break;
4238 case IMAGE_FLASHISM_JUMPVECTOR:
4239 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4240 break;
4241 case IMAGE_FIRMWARE_PHY:
4242 img_optype = OPTYPE_SH_PHY_FW;
4243 break;
4244 case IMAGE_REDBOOT_DIR:
4245 img_optype = OPTYPE_REDBOOT_DIR;
4246 break;
4247 case IMAGE_REDBOOT_CONFIG:
4248 img_optype = OPTYPE_REDBOOT_CONFIG;
4249 break;
4250 case IMAGE_UFI_DIR:
4251 img_optype = OPTYPE_UFI_DIR;
4252 break;
4253 default:
4254 break;
4255 }
4256
4257 return img_optype;
4258}
4259
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004260static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304261 const struct firmware *fw,
4262 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004263{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004264 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004265 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304266 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004267 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304268 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004269 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304270 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304271 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004272
4273 filehdr_size = sizeof(struct flash_file_hdr_g3);
4274 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4275 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304276 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304277 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004278 }
4279
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004280retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004281 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4282 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4283 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304284 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4285 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4286 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004287
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304288 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004289 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004290
4291 if (flash_offset_support)
4292 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4293 else
4294 flash_optype = img_optype;
4295
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304296 /* Don't bother verifying CRC if an old FW image is being
4297 * flashed
4298 */
4299 if (old_fw_img)
4300 goto flash;
4301
4302 status = be_check_flash_crc(adapter, fw->data, img_offset,
4303 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004304 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304305 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304306 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4307 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004308 /* The current FW image on the card does not support
4309 * OFFSET based flashing. Retry using older mechanism
4310 * of OPTYPE based flashing
4311 */
4312 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4313 flash_offset_support = false;
4314 goto retry_flash;
4315 }
4316
4317 /* The current FW image on the card does not recognize
4318 * the new FLASH op_type. The FW download is partially
4319 * complete. Reboot the server now to enable FW image
4320 * to recognize the new FLASH op_type. To complete the
4321 * remaining process, download the same FW again after
4322 * the reboot.
4323 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304324 dev_err(dev, "Flash incomplete. Reset the server\n");
4325 dev_err(dev, "Download FW image again after reset\n");
4326 return -EAGAIN;
4327 } else if (status) {
4328 dev_err(dev, "Could not get CRC for 0x%x region\n",
4329 img_optype);
4330 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004331 }
4332
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304333 if (crc_match)
4334 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004335
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304336flash:
4337 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004338 if (p + img_size > fw->data + fw->size)
4339 return -1;
4340
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004341 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4342 img_offset);
4343
4344 /* The current FW image on the card does not support OFFSET
4345 * based flashing. Retry using older mechanism of OPTYPE based
4346 * flashing
4347 */
4348 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4349 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4350 flash_offset_support = false;
4351 goto retry_flash;
4352 }
4353
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304354 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4355 * UFI_DIR region
4356 */
Kalesh AP4c600052014-05-30 19:06:26 +05304357 if (old_fw_img &&
4358 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4359 (img_optype == OPTYPE_UFI_DIR &&
4360 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304361 continue;
4362 } else if (status) {
4363 dev_err(dev, "Flashing section type 0x%x failed\n",
4364 img_type);
4365 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004366 }
4367 }
4368 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004369}
4370
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004371static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304372 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004373{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004374#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4375#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304376 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004377 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004378 const u8 *data_ptr = NULL;
4379 u8 *dest_image_ptr = NULL;
4380 size_t image_size = 0;
4381 u32 chunk_size = 0;
4382 u32 data_written = 0;
4383 u32 offset = 0;
4384 int status = 0;
4385 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004386 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004387
4388 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304389 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304390 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004391 }
4392
4393 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4394 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304395 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004396 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304397 if (!flash_cmd.va)
4398 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004399
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004400 dest_image_ptr = flash_cmd.va +
4401 sizeof(struct lancer_cmd_req_write_object);
4402 image_size = fw->size;
4403 data_ptr = fw->data;
4404
4405 while (image_size) {
4406 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4407
4408 /* Copy the image chunk content. */
4409 memcpy(dest_image_ptr, data_ptr, chunk_size);
4410
4411 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004412 chunk_size, offset,
4413 LANCER_FW_DOWNLOAD_LOCATION,
4414 &data_written, &change_status,
4415 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004416 if (status)
4417 break;
4418
4419 offset += data_written;
4420 data_ptr += data_written;
4421 image_size -= data_written;
4422 }
4423
4424 if (!status) {
4425 /* Commit the FW written */
4426 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004427 0, offset,
4428 LANCER_FW_DOWNLOAD_LOCATION,
4429 &data_written, &change_status,
4430 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004431 }
4432
Kalesh APbb864e02014-09-02 09:56:51 +05304433 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004434 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304435 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304436 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004437 }
4438
Kalesh APbb864e02014-09-02 09:56:51 +05304439 dev_info(dev, "Firmware flashed successfully\n");
4440
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004441 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304442 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004443 status = lancer_physdev_ctrl(adapter,
4444 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004445 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304446 dev_err(dev, "Adapter busy, could not reset FW\n");
4447 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004448 }
4449 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304450 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004451 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304452
4453 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004454}
4455
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004456#define BE2_UFI 2
4457#define BE3_UFI 3
4458#define BE3R_UFI 10
4459#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004460#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004461
Sathya Perlaca34fe32012-11-06 17:48:56 +00004462static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004463 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004464{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004465 if (!fhdr) {
4466 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4467 return -1;
4468 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004469
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004470 /* First letter of the build version is used to identify
4471 * which chip this image file is meant for.
4472 */
4473 switch (fhdr->build[0]) {
4474 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004475 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4476 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004477 case BLD_STR_UFI_TYPE_BE3:
4478 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4479 BE3_UFI;
4480 case BLD_STR_UFI_TYPE_BE2:
4481 return BE2_UFI;
4482 default:
4483 return -1;
4484 }
4485}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004486
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004487/* Check if the flash image file is compatible with the adapter that
4488 * is being flashed.
4489 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004490 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004491 */
4492static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4493 struct flash_file_hdr_g3 *fhdr)
4494{
4495 int ufi_type = be_get_ufi_type(adapter, fhdr);
4496
4497 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004498 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004499 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004500 case SH_UFI:
4501 return (skyhawk_chip(adapter) &&
4502 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004503 case BE3R_UFI:
4504 return BE3_chip(adapter);
4505 case BE3_UFI:
4506 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4507 case BE2_UFI:
4508 return BE2_chip(adapter);
4509 default:
4510 return false;
4511 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004512}
4513
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004514static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4515{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004516 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004517 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004518 struct image_hdr *img_hdr_ptr;
4519 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004520 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004521
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004522 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4523 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4524 dev_err(dev, "Flash image is not compatible with adapter\n");
4525 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004526 }
4527
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004528 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4529 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4530 GFP_KERNEL);
4531 if (!flash_cmd.va)
4532 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004533
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004534 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4535 for (i = 0; i < num_imgs; i++) {
4536 img_hdr_ptr = (struct image_hdr *)(fw->data +
4537 (sizeof(struct flash_file_hdr_g3) +
4538 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004539 if (!BE2_chip(adapter) &&
4540 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4541 continue;
4542
4543 if (skyhawk_chip(adapter))
4544 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4545 num_imgs);
4546 else
4547 status = be_flash_BEx(adapter, fw, &flash_cmd,
4548 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004549 }
4550
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004551 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4552 if (!status)
4553 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004554
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004555 return status;
4556}
4557
4558int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4559{
4560 const struct firmware *fw;
4561 int status;
4562
4563 if (!netif_running(adapter->netdev)) {
4564 dev_err(&adapter->pdev->dev,
4565 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304566 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004567 }
4568
4569 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4570 if (status)
4571 goto fw_exit;
4572
4573 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4574
4575 if (lancer_chip(adapter))
4576 status = lancer_fw_download(adapter, fw);
4577 else
4578 status = be_fw_download(adapter, fw);
4579
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004580 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304581 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004582
Ajit Khaparde84517482009-09-04 03:12:16 +00004583fw_exit:
4584 release_firmware(fw);
4585 return status;
4586}
4587
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004588static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4589 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004590{
4591 struct be_adapter *adapter = netdev_priv(dev);
4592 struct nlattr *attr, *br_spec;
4593 int rem;
4594 int status = 0;
4595 u16 mode = 0;
4596
4597 if (!sriov_enabled(adapter))
4598 return -EOPNOTSUPP;
4599
4600 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004601 if (!br_spec)
4602 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004603
4604 nla_for_each_nested(attr, br_spec, rem) {
4605 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4606 continue;
4607
Thomas Grafb7c1a312014-11-26 13:42:17 +01004608 if (nla_len(attr) < sizeof(mode))
4609 return -EINVAL;
4610
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004611 mode = nla_get_u16(attr);
4612 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4613 return -EINVAL;
4614
4615 status = be_cmd_set_hsw_config(adapter, 0, 0,
4616 adapter->if_handle,
4617 mode == BRIDGE_MODE_VEPA ?
4618 PORT_FWD_TYPE_VEPA :
4619 PORT_FWD_TYPE_VEB);
4620 if (status)
4621 goto err;
4622
4623 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4624 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4625
4626 return status;
4627 }
4628err:
4629 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4630 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4631
4632 return status;
4633}
4634
4635static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304636 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004637{
4638 struct be_adapter *adapter = netdev_priv(dev);
4639 int status = 0;
4640 u8 hsw_mode;
4641
4642 if (!sriov_enabled(adapter))
4643 return 0;
4644
4645 /* BE and Lancer chips support VEB mode only */
4646 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4647 hsw_mode = PORT_FWD_TYPE_VEB;
4648 } else {
4649 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4650 adapter->if_handle, &hsw_mode);
4651 if (status)
4652 return 0;
4653 }
4654
4655 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4656 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004657 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4658 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004659}
4660
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304661#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004662/* VxLAN offload Notes:
4663 *
4664 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4665 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4666 * is expected to work across all types of IP tunnels once exported. Skyhawk
4667 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304668 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4669 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4670 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004671 *
4672 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4673 * adds more than one port, disable offloads and don't re-enable them again
4674 * until after all the tunnels are removed.
4675 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304676static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4677 __be16 port)
4678{
4679 struct be_adapter *adapter = netdev_priv(netdev);
4680 struct device *dev = &adapter->pdev->dev;
4681 int status;
4682
4683 if (lancer_chip(adapter) || BEx_chip(adapter))
4684 return;
4685
4686 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304687 dev_info(dev,
4688 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004689 dev_info(dev, "Disabling VxLAN offloads\n");
4690 adapter->vxlan_port_count++;
4691 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304692 }
4693
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004694 if (adapter->vxlan_port_count++ >= 1)
4695 return;
4696
Sathya Perlac9c47142014-03-27 10:46:19 +05304697 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4698 OP_CONVERT_NORMAL_TO_TUNNEL);
4699 if (status) {
4700 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4701 goto err;
4702 }
4703
4704 status = be_cmd_set_vxlan_port(adapter, port);
4705 if (status) {
4706 dev_warn(dev, "Failed to add VxLAN port\n");
4707 goto err;
4708 }
4709 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4710 adapter->vxlan_port = port;
4711
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004712 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4713 NETIF_F_TSO | NETIF_F_TSO6 |
4714 NETIF_F_GSO_UDP_TUNNEL;
4715 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304716 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004717
Sathya Perlac9c47142014-03-27 10:46:19 +05304718 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4719 be16_to_cpu(port));
4720 return;
4721err:
4722 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304723}
4724
4725static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4726 __be16 port)
4727{
4728 struct be_adapter *adapter = netdev_priv(netdev);
4729
4730 if (lancer_chip(adapter) || BEx_chip(adapter))
4731 return;
4732
4733 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004734 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304735
4736 be_disable_vxlan_offloads(adapter);
4737
4738 dev_info(&adapter->pdev->dev,
4739 "Disabled VxLAN offloads for UDP port %d\n",
4740 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004741done:
4742 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304743}
Joe Stringer725d5482014-11-13 16:38:13 -08004744
Jesse Gross5f352272014-12-23 22:37:26 -08004745static netdev_features_t be_features_check(struct sk_buff *skb,
4746 struct net_device *dev,
4747 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004748{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304749 struct be_adapter *adapter = netdev_priv(dev);
4750 u8 l4_hdr = 0;
4751
4752 /* The code below restricts offload features for some tunneled packets.
4753 * Offload features for normal (non tunnel) packets are unchanged.
4754 */
4755 if (!skb->encapsulation ||
4756 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4757 return features;
4758
4759 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4760 * should disable tunnel offload features if it's not a VxLAN packet,
4761 * as tunnel offloads have been enabled only for VxLAN. This is done to
4762 * allow other tunneled traffic like GRE work fine while VxLAN
4763 * offloads are configured in Skyhawk-R.
4764 */
4765 switch (vlan_get_protocol(skb)) {
4766 case htons(ETH_P_IP):
4767 l4_hdr = ip_hdr(skb)->protocol;
4768 break;
4769 case htons(ETH_P_IPV6):
4770 l4_hdr = ipv6_hdr(skb)->nexthdr;
4771 break;
4772 default:
4773 return features;
4774 }
4775
4776 if (l4_hdr != IPPROTO_UDP ||
4777 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4778 skb->inner_protocol != htons(ETH_P_TEB) ||
4779 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4780 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4781 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4782
4783 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004784}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304785#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304786
stephen hemmingere5686ad2012-01-05 19:10:25 +00004787static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004788 .ndo_open = be_open,
4789 .ndo_stop = be_close,
4790 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004791 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004792 .ndo_set_mac_address = be_mac_addr_set,
4793 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004794 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004795 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004796 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4797 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004798 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004799 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004800 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004801 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304802 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004803#ifdef CONFIG_NET_POLL_CONTROLLER
4804 .ndo_poll_controller = be_netpoll,
4805#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004806 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4807 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304808#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304809 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304810#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304811#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304812 .ndo_add_vxlan_port = be_add_vxlan_port,
4813 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004814 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304815#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004816};
4817
4818static void be_netdev_init(struct net_device *netdev)
4819{
4820 struct be_adapter *adapter = netdev_priv(netdev);
4821
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004822 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004824 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004825 if (be_multi_rxq(adapter))
4826 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004827
4828 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004829 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004830
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004831 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004832 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004833
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004834 netdev->priv_flags |= IFF_UNICAST_FLT;
4835
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004836 netdev->flags |= IFF_MULTICAST;
4837
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004838 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004840 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004841
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004842 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004843}
4844
4845static void be_unmap_pci_bars(struct be_adapter *adapter)
4846{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004847 if (adapter->csr)
4848 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004849 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004850 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004851}
4852
Sathya Perlace66f782012-11-06 17:48:58 +00004853static int db_bar(struct be_adapter *adapter)
4854{
4855 if (lancer_chip(adapter) || !be_physfn(adapter))
4856 return 0;
4857 else
4858 return 4;
4859}
4860
4861static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004862{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004863 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004864 adapter->roce_db.size = 4096;
4865 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4866 db_bar(adapter));
4867 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4868 db_bar(adapter));
4869 }
Parav Pandit045508a2012-03-26 14:27:13 +00004870 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004871}
4872
4873static int be_map_pci_bars(struct be_adapter *adapter)
4874{
Suresh Reddy25848c92015-03-20 06:28:25 -04004875 struct pci_dev *pdev = adapter->pdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004876 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004877
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004878 if (BEx_chip(adapter) && be_physfn(adapter)) {
Suresh Reddy25848c92015-03-20 06:28:25 -04004879 adapter->csr = pci_iomap(pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304880 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004881 return -ENOMEM;
4882 }
4883
Suresh Reddy25848c92015-03-20 06:28:25 -04004884 addr = pci_iomap(pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304885 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004886 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004887 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004888
Suresh Reddy25848c92015-03-20 06:28:25 -04004889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
Sathya Perlace66f782012-11-06 17:48:58 +00004901 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004902 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004903
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004904pci_map_err:
Suresh Reddy25848c92015-03-20 06:28:25 -04004905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004906 be_unmap_pci_bars(adapter);
4907 return -ENOMEM;
4908}
4909
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004910static void be_ctrl_cleanup(struct be_adapter *adapter)
4911{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004912 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004913
4914 be_unmap_pci_bars(adapter);
4915
4916 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004917 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4918 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004919
Sathya Perla5b8821b2011-08-02 19:57:44 +00004920 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004921 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004922 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4923 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004924}
4925
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004926static int be_ctrl_init(struct be_adapter *adapter)
4927{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004928 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4929 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004930 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004931 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004932 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004933
Sathya Perlace66f782012-11-06 17:48:58 +00004934 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4935 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4936 SLI_INTF_FAMILY_SHIFT;
4937 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4938
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004939 status = be_map_pci_bars(adapter);
4940 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004941 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004942
4943 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004944 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4945 mbox_mem_alloc->size,
4946 &mbox_mem_alloc->dma,
4947 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004948 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004949 status = -ENOMEM;
4950 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004951 }
4952 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4953 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4954 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4955 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004956
Sathya Perla5b8821b2011-08-02 19:57:44 +00004957 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004958 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4959 rx_filter->size, &rx_filter->dma,
4960 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304961 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004962 status = -ENOMEM;
4963 goto free_mbox;
4964 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004965
Ivan Vecera29849612010-12-14 05:43:19 +00004966 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004967 spin_lock_init(&adapter->mcc_lock);
4968 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004969
Suresh Reddy5eeff632014-01-06 13:02:24 +05304970 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004971 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004972 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004973
4974free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004975 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4976 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004977
4978unmap_pci_bars:
4979 be_unmap_pci_bars(adapter);
4980
4981done:
4982 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004983}
4984
4985static void be_stats_cleanup(struct be_adapter *adapter)
4986{
Sathya Perla3abcded2010-10-03 22:12:27 -07004987 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004988
4989 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004990 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4991 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004992}
4993
4994static int be_stats_init(struct be_adapter *adapter)
4995{
Sathya Perla3abcded2010-10-03 22:12:27 -07004996 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004997
Sathya Perlaca34fe32012-11-06 17:48:56 +00004998 if (lancer_chip(adapter))
4999 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5000 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00005001 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05005002 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00005003 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05005004 else
5005 /* ALL non-BE ASICs */
5006 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00005007
Joe Perchesede23fa2013-08-26 22:45:23 -07005008 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
5009 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05305010 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05305011 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005012 return 0;
5013}
5014
Bill Pemberton3bc6b062012-12-03 09:23:09 -05005015static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005016{
5017 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00005018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005019 if (!adapter)
5020 return;
5021
Parav Pandit045508a2012-03-26 14:27:13 +00005022 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005023 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00005024
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005025 cancel_delayed_work_sync(&adapter->func_recovery_work);
5026
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005027 unregister_netdev(adapter->netdev);
5028
Sathya Perla5fb379e2009-06-18 00:02:59 +00005029 be_clear(adapter);
5030
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005031 /* tell fw we're done with firing cmds */
5032 be_cmd_fw_clean(adapter);
5033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005034 be_stats_cleanup(adapter);
5035
5036 be_ctrl_cleanup(adapter);
5037
Sathya Perlad6b6d982012-09-05 01:56:48 +00005038 pci_disable_pcie_error_reporting(pdev);
5039
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005040 pci_release_regions(pdev);
5041 pci_disable_device(pdev);
5042
5043 free_netdev(adapter->netdev);
5044}
5045
Sathya Perla39f1d942012-05-08 19:41:24 +00005046static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005047{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05305048 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00005049
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00005050 status = be_cmd_get_cntl_attributes(adapter);
5051 if (status)
5052 return status;
5053
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005054 /* Must be a power of 2 or else MODULO will BUG_ON */
5055 adapter->be_get_temp_freq = 64;
5056
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05305057 if (BEx_chip(adapter)) {
5058 level = be_cmd_get_fw_log_level(adapter);
5059 adapter->msg_enable =
5060 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
5061 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00005062
Sathya Perla92bf14a2013-08-27 16:57:32 +05305063 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00005064 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005065}
5066
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005067static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005068{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005069 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005070 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005071
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005072 status = lancer_test_and_set_rdy_state(adapter);
5073 if (status)
5074 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005075
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005076 if (netif_running(adapter->netdev))
5077 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005078
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005079 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005080
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005081 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005082
5083 status = be_setup(adapter);
5084 if (status)
5085 goto err;
5086
5087 if (netif_running(adapter->netdev)) {
5088 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005089 if (status)
5090 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005091 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005092
Somnath Kotur4bebb562013-12-05 12:07:55 +05305093 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005094 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005095err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005096 if (status == -EAGAIN)
5097 dev_err(dev, "Waiting for resource provisioning\n");
5098 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05305099 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005100
5101 return status;
5102}
5103
5104static void be_func_recovery_task(struct work_struct *work)
5105{
5106 struct be_adapter *adapter =
5107 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005108 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005109
5110 be_detect_error(adapter);
5111
5112 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005113 rtnl_lock();
5114 netif_device_detach(adapter->netdev);
5115 rtnl_unlock();
5116
5117 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005118 if (!status)
5119 netif_device_attach(adapter->netdev);
5120 }
5121
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005122 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5123 * no need to attempt further recovery.
5124 */
5125 if (!status || status == -EAGAIN)
5126 schedule_delayed_work(&adapter->func_recovery_work,
5127 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005128}
5129
Vasundhara Volam21252372015-02-06 08:18:42 -05005130static void be_log_sfp_info(struct be_adapter *adapter)
5131{
5132 int status;
5133
5134 status = be_cmd_query_sfp_info(adapter);
5135 if (!status) {
5136 dev_err(&adapter->pdev->dev,
5137 "Unqualified SFP+ detected on %c from %s part no: %s",
5138 adapter->port_name, adapter->phy.vendor_name,
5139 adapter->phy.vendor_pn);
5140 }
5141 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5142}
5143
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005144static void be_worker(struct work_struct *work)
5145{
5146 struct be_adapter *adapter =
5147 container_of(work, struct be_adapter, work.work);
5148 struct be_rx_obj *rxo;
5149 int i;
5150
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005151 /* when interrupts are not yet enabled, just reap any pending
5152 * mcc completions */
5153 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005154 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005155 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005156 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005157 goto reschedule;
5158 }
5159
5160 if (!adapter->stats_cmd_sent) {
5161 if (lancer_chip(adapter))
5162 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305163 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005164 else
5165 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5166 }
5167
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305168 if (be_physfn(adapter) &&
5169 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005170 be_cmd_get_die_temperature(adapter);
5171
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005172 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305173 /* Replenish RX-queues starved due to memory
5174 * allocation failures.
5175 */
5176 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305177 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005178 }
5179
Sathya Perla2632baf2013-10-01 16:00:00 +05305180 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005181
Vasundhara Volam21252372015-02-06 08:18:42 -05005182 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5183 be_log_sfp_info(adapter);
5184
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005185reschedule:
5186 adapter->work_counter++;
5187 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5188}
5189
Sathya Perla257a3fe2013-06-14 15:54:51 +05305190/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00005191static bool be_reset_required(struct be_adapter *adapter)
5192{
Sathya Perla257a3fe2013-06-14 15:54:51 +05305193 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00005194}
5195
Sathya Perlad3791422012-09-28 04:39:44 +00005196static char *mc_name(struct be_adapter *adapter)
5197{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305198 char *str = ""; /* default */
5199
5200 switch (adapter->mc_type) {
5201 case UMC:
5202 str = "UMC";
5203 break;
5204 case FLEX10:
5205 str = "FLEX10";
5206 break;
5207 case vNIC1:
5208 str = "vNIC-1";
5209 break;
5210 case nPAR:
5211 str = "nPAR";
5212 break;
5213 case UFP:
5214 str = "UFP";
5215 break;
5216 case vNIC2:
5217 str = "vNIC-2";
5218 break;
5219 default:
5220 str = "";
5221 }
5222
5223 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005224}
5225
5226static inline char *func_name(struct be_adapter *adapter)
5227{
5228 return be_physfn(adapter) ? "PF" : "VF";
5229}
5230
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005231static inline char *nic_name(struct pci_dev *pdev)
5232{
5233 switch (pdev->device) {
5234 case OC_DEVICE_ID1:
5235 return OC_NAME;
5236 case OC_DEVICE_ID2:
5237 return OC_NAME_BE;
5238 case OC_DEVICE_ID3:
5239 case OC_DEVICE_ID4:
5240 return OC_NAME_LANCER;
5241 case BE_DEVICE_ID2:
5242 return BE3_NAME;
5243 case OC_DEVICE_ID5:
5244 case OC_DEVICE_ID6:
5245 return OC_NAME_SH;
5246 default:
5247 return BE_NAME;
5248 }
5249}
5250
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005251static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005252{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005253 struct be_adapter *adapter;
5254 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005255 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005256
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305257 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5258
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005259 status = pci_enable_device(pdev);
5260 if (status)
5261 goto do_none;
5262
5263 status = pci_request_regions(pdev, DRV_NAME);
5264 if (status)
5265 goto disable_dev;
5266 pci_set_master(pdev);
5267
Sathya Perla7f640062012-06-05 19:37:20 +00005268 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305269 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005270 status = -ENOMEM;
5271 goto rel_reg;
5272 }
5273 adapter = netdev_priv(netdev);
5274 adapter->pdev = pdev;
5275 pci_set_drvdata(pdev, adapter);
5276 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005277 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005278
Russell King4c15c242013-06-26 23:49:11 +01005279 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005280 if (!status) {
5281 netdev->features |= NETIF_F_HIGHDMA;
5282 } else {
Russell King4c15c242013-06-26 23:49:11 +01005283 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005284 if (status) {
5285 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5286 goto free_netdev;
5287 }
5288 }
5289
Kalesh AP2f951a92014-09-12 17:39:21 +05305290 status = pci_enable_pcie_error_reporting(pdev);
5291 if (!status)
5292 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005294 status = be_ctrl_init(adapter);
5295 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005296 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005297
Sathya Perla2243e2e2009-11-22 22:02:03 +00005298 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005299 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005300 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005301 if (status)
5302 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005303 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00005304
Sathya Perla39f1d942012-05-08 19:41:24 +00005305 if (be_reset_required(adapter)) {
5306 status = be_cmd_reset_function(adapter);
5307 if (status)
5308 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07005309
Kalesh AP2d177be2013-04-28 22:22:29 +00005310 /* Wait for interrupts to quiesce after an FLR */
5311 msleep(100);
5312 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005313
5314 /* Allow interrupts for other ULPs running on NIC function */
5315 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005316
Kalesh AP2d177be2013-04-28 22:22:29 +00005317 /* tell fw we're ready to fire cmds */
5318 status = be_cmd_fw_init(adapter);
5319 if (status)
5320 goto ctrl_clean;
5321
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005322 status = be_stats_init(adapter);
5323 if (status)
5324 goto ctrl_clean;
5325
Sathya Perla39f1d942012-05-08 19:41:24 +00005326 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005327 if (status)
5328 goto stats_clean;
5329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005330 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005331 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05305332 adapter->rx_fc = true;
5333 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005334
Sathya Perla5fb379e2009-06-18 00:02:59 +00005335 status = be_setup(adapter);
5336 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00005337 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005338
Sathya Perla3abcded2010-10-03 22:12:27 -07005339 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005340 status = register_netdev(netdev);
5341 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005342 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005343
Parav Pandit045508a2012-03-26 14:27:13 +00005344 be_roce_dev_add(adapter);
5345
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005346 schedule_delayed_work(&adapter->func_recovery_work,
5347 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005348
Sathya Perlad3791422012-09-28 04:39:44 +00005349 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005350 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005351
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005352 return 0;
5353
Sathya Perla5fb379e2009-06-18 00:02:59 +00005354unsetup:
5355 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005356stats_clean:
5357 be_stats_cleanup(adapter);
5358ctrl_clean:
5359 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005360free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005361 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005362rel_reg:
5363 pci_release_regions(pdev);
5364disable_dev:
5365 pci_disable_device(pdev);
5366do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005367 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005368 return status;
5369}
5370
5371static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5372{
5373 struct be_adapter *adapter = pci_get_drvdata(pdev);
5374 struct net_device *netdev = adapter->netdev;
5375
Suresh Reddy76a9e082014-01-15 13:23:40 +05305376 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005377 be_setup_wol(adapter, true);
5378
Ajit Khaparded4360d62013-11-22 12:51:09 -06005379 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005380 cancel_delayed_work_sync(&adapter->func_recovery_work);
5381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005382 netif_device_detach(netdev);
5383 if (netif_running(netdev)) {
5384 rtnl_lock();
5385 be_close(netdev);
5386 rtnl_unlock();
5387 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005388 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005389
5390 pci_save_state(pdev);
5391 pci_disable_device(pdev);
5392 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5393 return 0;
5394}
5395
5396static int be_resume(struct pci_dev *pdev)
5397{
5398 int status = 0;
5399 struct be_adapter *adapter = pci_get_drvdata(pdev);
5400 struct net_device *netdev = adapter->netdev;
5401
5402 netif_device_detach(netdev);
5403
5404 status = pci_enable_device(pdev);
5405 if (status)
5406 return status;
5407
Yijing Wang1ca01512013-06-27 20:53:42 +08005408 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005409 pci_restore_state(pdev);
5410
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305411 status = be_fw_wait_ready(adapter);
5412 if (status)
5413 return status;
5414
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005415 status = be_cmd_reset_function(adapter);
5416 if (status)
5417 return status;
5418
Ajit Khaparded4360d62013-11-22 12:51:09 -06005419 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005420 /* tell fw we're ready to fire cmds */
5421 status = be_cmd_fw_init(adapter);
5422 if (status)
5423 return status;
5424
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005425 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005426 if (netif_running(netdev)) {
5427 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005428 be_open(netdev);
5429 rtnl_unlock();
5430 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005431
5432 schedule_delayed_work(&adapter->func_recovery_work,
5433 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005434 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005435
Suresh Reddy76a9e082014-01-15 13:23:40 +05305436 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005437 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005438
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005439 return 0;
5440}
5441
Sathya Perla82456b02010-02-17 01:35:37 +00005442/*
5443 * An FLR will stop BE from DMAing any data.
5444 */
5445static void be_shutdown(struct pci_dev *pdev)
5446{
5447 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005448
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005449 if (!adapter)
5450 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005451
Devesh Sharmad114f992014-06-10 19:32:15 +05305452 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005453 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005454 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005455
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005456 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005457
Ajit Khaparde57841862011-04-06 18:08:43 +00005458 be_cmd_reset_function(adapter);
5459
Sathya Perla82456b02010-02-17 01:35:37 +00005460 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005461}
5462
Sathya Perlacf588472010-02-14 21:22:01 +00005463static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305464 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005465{
5466 struct be_adapter *adapter = pci_get_drvdata(pdev);
5467 struct net_device *netdev = adapter->netdev;
5468
5469 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5470
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005471 if (!adapter->eeh_error) {
5472 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005473
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005474 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005475
Sathya Perlacf588472010-02-14 21:22:01 +00005476 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005477 netif_device_detach(netdev);
5478 if (netif_running(netdev))
5479 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005480 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005481
5482 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005483 }
Sathya Perlacf588472010-02-14 21:22:01 +00005484
5485 if (state == pci_channel_io_perm_failure)
5486 return PCI_ERS_RESULT_DISCONNECT;
5487
5488 pci_disable_device(pdev);
5489
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005490 /* The error could cause the FW to trigger a flash debug dump.
5491 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005492 * can cause it not to recover; wait for it to finish.
5493 * Wait only for first function as it is needed only once per
5494 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005495 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005496 if (pdev->devfn == 0)
5497 ssleep(30);
5498
Sathya Perlacf588472010-02-14 21:22:01 +00005499 return PCI_ERS_RESULT_NEED_RESET;
5500}
5501
5502static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5503{
5504 struct be_adapter *adapter = pci_get_drvdata(pdev);
5505 int status;
5506
5507 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005508
5509 status = pci_enable_device(pdev);
5510 if (status)
5511 return PCI_ERS_RESULT_DISCONNECT;
5512
5513 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005514 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005515 pci_restore_state(pdev);
5516
5517 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005518 dev_info(&adapter->pdev->dev,
5519 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005520 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005521 if (status)
5522 return PCI_ERS_RESULT_DISCONNECT;
5523
Sathya Perlad6b6d982012-09-05 01:56:48 +00005524 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005525 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005526 return PCI_ERS_RESULT_RECOVERED;
5527}
5528
5529static void be_eeh_resume(struct pci_dev *pdev)
5530{
5531 int status = 0;
5532 struct be_adapter *adapter = pci_get_drvdata(pdev);
5533 struct net_device *netdev = adapter->netdev;
5534
5535 dev_info(&adapter->pdev->dev, "EEH resume\n");
5536
5537 pci_save_state(pdev);
5538
Kalesh AP2d177be2013-04-28 22:22:29 +00005539 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005540 if (status)
5541 goto err;
5542
Kalesh AP03a58ba2014-05-13 14:03:11 +05305543 /* On some BE3 FW versions, after a HW reset,
5544 * interrupts will remain disabled for each function.
5545 * So, explicitly enable interrupts
5546 */
5547 be_intr_set(adapter, true);
5548
Kalesh AP2d177be2013-04-28 22:22:29 +00005549 /* tell fw we're ready to fire cmds */
5550 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005551 if (status)
5552 goto err;
5553
Sathya Perlacf588472010-02-14 21:22:01 +00005554 status = be_setup(adapter);
5555 if (status)
5556 goto err;
5557
5558 if (netif_running(netdev)) {
5559 status = be_open(netdev);
5560 if (status)
5561 goto err;
5562 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005563
5564 schedule_delayed_work(&adapter->func_recovery_work,
5565 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005566 netif_device_attach(netdev);
5567 return;
5568err:
5569 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005570}
5571
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005572static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005573 .error_detected = be_eeh_err_detected,
5574 .slot_reset = be_eeh_reset,
5575 .resume = be_eeh_resume,
5576};
5577
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005578static struct pci_driver be_driver = {
5579 .name = DRV_NAME,
5580 .id_table = be_dev_ids,
5581 .probe = be_probe,
5582 .remove = be_remove,
5583 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005584 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005585 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005586 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005587};
5588
5589static int __init be_init_module(void)
5590{
Joe Perches8e95a202009-12-03 07:58:21 +00005591 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5592 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005593 printk(KERN_WARNING DRV_NAME
5594 " : Module param rx_frag_size must be 2048/4096/8192."
5595 " Using 2048\n");
5596 rx_frag_size = 2048;
5597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005598
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005599 return pci_register_driver(&be_driver);
5600}
5601module_init(be_init_module);
5602
5603static void __exit be_exit_module(void)
5604{
5605 pci_unregister_driver(&be_driver);
5606}
5607module_exit(be_exit_module);