blob: a6df4c96150fc4e7b6bc55d8b5891029df041937 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Benoit Taine9baa3c32014-08-08 15:56:03 +020041static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053083 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000087};
Kalesh APe2fb1af2014-09-19 15:46:58 +053088
Ajit Khaparde7c185272010-07-29 06:16:33 +000089/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530128
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530190
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700200{
201 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530202
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000205
206 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000207 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208}
209
Sathya Perla8788fdc2009-07-27 22:52:03 +0000210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530211 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212{
213 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000217
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000218 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000219 return;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231{
232 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000270 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 }
283
Sathya Perla5a712c12013-07-23 15:24:59 +0530284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000286 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000289 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000290 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
dingtianhong61d23e92013-12-30 15:40:43 +0800295 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530296 status = -EPERM;
297 goto err;
298 }
299
Somnath Koture3a7ae22011-10-27 07:14:05 +0000300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000302 return 0;
303err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305 return status;
306}
307
Sathya Perlaca34fe32012-11-06 17:48:56 +0000308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
322 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
340 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
Sathya Perlaca34fe32012-11-06 17:48:56 +0000393static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
Ajit Khaparde61000862013-10-03 16:16:33 -0500439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530483 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500491}
492
Selvin Xavier005d5692011-05-16 07:36:35 +0000493static void populate_lancer_stats(struct be_adapter *adapter)
494{
Selvin Xavier005d5692011-05-16 07:36:35 +0000495 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000529
Sathya Perla09c1c682011-08-22 19:41:53 +0000530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
Jingoo Han4188e7d2013-08-05 18:02:02 +0900542static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530543 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000555void be_parse_stats(struct be_adapter *adapter)
556{
Ajit Khaparde61000862013-10-03 16:16:33 -0500557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558 struct be_rx_obj *rxo;
559 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000560 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else if (BE3_chip(adapter))
568 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000569 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else
571 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000578 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000579}
580
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530582 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000585 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700586 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000587 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000588 u64 pkts, bytes;
589 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700590 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Sathya Perla3abcded2010-10-03 22:12:27 -0700592 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530594
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 }
606
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
619 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
638 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000640
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000646 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647}
648
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 struct net_device *netdev = adapter->netdev;
652
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000654 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000657
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530658 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla3c8def92011-06-12 20:01:58 +0000666 struct be_tx_stats *stats = tx_stats(txo);
667
Sathya Perlaab1594e2011-07-25 19:10:15 +0000668 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000669 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000698 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530702 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100707 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
Sathya Perlac9c47142014-03-27 10:46:19 +0530717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530730static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
731 struct sk_buff *skb,
732 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530734 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000736 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530737 BE_WRB_F_SET(wrb_params->features, LSO, 1);
738 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000739 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530740 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700741 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530742 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530743 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 proto = skb_inner_ip_proto(skb);
745 } else {
746 proto = skb_ip_proto(skb);
747 }
748 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530749 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530751 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 }
753
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100754 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530755 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
756 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 }
758
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530759 BE_WRB_F_SET(wrb_params->features, CRC, 1);
760}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500761
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762static void wrb_fill_hdr(struct be_adapter *adapter,
763 struct be_eth_hdr_wrb *hdr,
764 struct be_wrb_params *wrb_params,
765 struct sk_buff *skb)
766{
767 memset(hdr, 0, sizeof(*hdr));
768
769 SET_TX_WRB_HDR_BITS(crc, hdr,
770 BE_WRB_F_GET(wrb_params->features, CRC));
771 SET_TX_WRB_HDR_BITS(ipcs, hdr,
772 BE_WRB_F_GET(wrb_params->features, IPCS));
773 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
774 BE_WRB_F_GET(wrb_params->features, TCPCS));
775 SET_TX_WRB_HDR_BITS(udpcs, hdr,
776 BE_WRB_F_GET(wrb_params->features, UDPCS));
777
778 SET_TX_WRB_HDR_BITS(lso, hdr,
779 BE_WRB_F_GET(wrb_params->features, LSO));
780 SET_TX_WRB_HDR_BITS(lso6, hdr,
781 BE_WRB_F_GET(wrb_params->features, LSO6));
782 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
783
784 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
785 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500786 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530787 SET_TX_WRB_HDR_BITS(event, hdr,
788 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
789 SET_TX_WRB_HDR_BITS(vlan, hdr,
790 BE_WRB_F_GET(wrb_params->features, VLAN));
791 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
792
793 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
794 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795}
796
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000797static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530798 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000799{
800 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500801 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000802
Sathya Perla7101e112010-03-22 20:41:12 +0000803
Sathya Perlaf986afc2015-02-06 08:18:43 -0500804 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
805 (u64)le32_to_cpu(wrb->frag_pa_lo);
806 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000807 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500808 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000809 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500810 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000811 }
812}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530814/* Grab a WRB header for xmit */
815static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530817 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530819 queue_head_inc(&txo->q);
820 return head;
821}
822
823/* Set up the WRB header for xmit */
824static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
825 struct be_tx_obj *txo,
826 struct be_wrb_params *wrb_params,
827 struct sk_buff *skb, u16 head)
828{
829 u32 num_frags = skb_wrb_cnt(skb);
830 struct be_queue_info *txq = &txo->q;
831 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
832
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530833 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500834 be_dws_cpu_to_le(hdr, sizeof(*hdr));
835
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500836 BUG_ON(txo->sent_skb_list[head]);
837 txo->sent_skb_list[head] = skb;
838 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530839 atomic_add(num_frags, &txq->used);
840 txo->last_req_wrb_cnt = num_frags;
841 txo->pend_wrb_cnt += num_frags;
842}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530844/* Setup a WRB fragment (buffer descriptor) for xmit */
845static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
846 int len)
847{
848 struct be_eth_wrb *wrb;
849 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530851 wrb = queue_head_node(txq);
852 wrb_fill(wrb, busaddr, len);
853 queue_head_inc(txq);
854}
855
856/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
857 * was invoked. The producer index is restored to the previous packet and the
858 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
859 */
860static void be_xmit_restore(struct be_adapter *adapter,
861 struct be_tx_obj *txo, u16 head, bool map_single,
862 u32 copied)
863{
864 struct device *dev;
865 struct be_eth_wrb *wrb;
866 struct be_queue_info *txq = &txo->q;
867
868 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500869 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530870
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500871 /* skip the first wrb (hdr); it's not mapped */
872 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000873 while (copied) {
874 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000875 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000876 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500877 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000878 queue_head_inc(txq);
879 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530880
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500881 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530882}
883
884/* Enqueue the given packet for transmit. This routine allocates WRBs for the
885 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
886 * of WRBs used up by the packet.
887 */
888static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
889 struct sk_buff *skb,
890 struct be_wrb_params *wrb_params)
891{
892 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
893 struct device *dev = &adapter->pdev->dev;
894 struct be_queue_info *txq = &txo->q;
895 bool map_single = false;
896 u16 head = txq->head;
897 dma_addr_t busaddr;
898 int len;
899
900 head = be_tx_get_wrb_hdr(txo);
901
902 if (skb->len > skb->data_len) {
903 len = skb_headlen(skb);
904
905 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
906 if (dma_mapping_error(dev, busaddr))
907 goto dma_err;
908 map_single = true;
909 be_tx_setup_wrb_frag(txo, busaddr, len);
910 copied += len;
911 }
912
913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
914 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
915 len = skb_frag_size(frag);
916
917 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
918 if (dma_mapping_error(dev, busaddr))
919 goto dma_err;
920 be_tx_setup_wrb_frag(txo, busaddr, len);
921 copied += len;
922 }
923
924 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
925
926 be_tx_stats_update(txo, skb);
927 return wrb_cnt;
928
929dma_err:
930 adapter->drv_stats.dma_map_errors++;
931 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000932 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933}
934
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500935static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
936{
937 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
938}
939
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000941 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530942 struct be_wrb_params
943 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000944{
945 u16 vlan_tag = 0;
946
947 skb = skb_share_check(skb, GFP_ATOMIC);
948 if (unlikely(!skb))
949 return skb;
950
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100951 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000952 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530953
954 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
955 if (!vlan_tag)
956 vlan_tag = adapter->pvid;
957 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
958 * skip VLAN insertion
959 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530960 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530961 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000962
963 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100964 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
965 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000966 if (unlikely(!skb))
967 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 skb->vlan_tci = 0;
969 }
970
971 /* Insert the outer VLAN, if any */
972 if (adapter->qnq_vid) {
973 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100974 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
975 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000976 if (unlikely(!skb))
977 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530978 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 }
980
Somnath Kotur93040ae2012-06-26 22:32:10 +0000981 return skb;
982}
983
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000984static bool be_ipv6_exthdr_check(struct sk_buff *skb)
985{
986 struct ethhdr *eh = (struct ethhdr *)skb->data;
987 u16 offset = ETH_HLEN;
988
989 if (eh->h_proto == htons(ETH_P_IPV6)) {
990 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
991
992 offset += sizeof(struct ipv6hdr);
993 if (ip6h->nexthdr != NEXTHDR_TCP &&
994 ip6h->nexthdr != NEXTHDR_UDP) {
995 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530996 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000997
998 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
999 if (ehdr->hdrlen == 0xff)
1000 return true;
1001 }
1002 }
1003 return false;
1004}
1005
1006static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1007{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001008 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001009}
1010
Sathya Perla748b5392014-05-09 13:29:13 +05301011static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001012{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001013 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001014}
1015
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301016static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1017 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301018 struct be_wrb_params
1019 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001021 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001022 unsigned int eth_hdr_len;
1023 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001024
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001025 /* For padded packets, BE HW modifies tot_len field in IP header
1026 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001027 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001028 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001029 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1030 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001031 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001032 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001033 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001034 ip = (struct iphdr *)ip_hdr(skb);
1035 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1036 }
1037
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001038 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301039 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001040 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301041 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001042 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301043 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001044
Somnath Kotur93040ae2012-06-26 22:32:10 +00001045 /* HW has a bug wherein it will calculate CSUM for VLAN
1046 * pkts even though it is disabled.
1047 * Manually insert VLAN in pkt.
1048 */
1049 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301051 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001052 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301053 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001054 }
1055
1056 /* HW may lockup when VLAN HW tagging is requested on
1057 * certain ipv6 packets. Drop such pkts if the HW workaround to
1058 * skip HW tagging is not enabled by FW.
1059 */
1060 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301061 (adapter->pvid || adapter->qnq_vid) &&
1062 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001063 goto tx_drop;
1064
1065 /* Manual VLAN tag insertion to prevent:
1066 * ASIC lockup when the ASIC inserts VLAN tag into
1067 * certain ipv6 packets. Insert VLAN tags in driver,
1068 * and set event, completion, vlan bits accordingly
1069 * in the Tx WRB.
1070 */
1071 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1072 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301073 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001074 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301075 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001076 }
1077
Sathya Perlaee9c7992013-05-22 23:04:55 +00001078 return skb;
1079tx_drop:
1080 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301081err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001082 return NULL;
1083}
1084
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301085static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1086 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301087 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301088{
1089 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1090 * less may cause a transmit stall on that port. So the work-around is
1091 * to pad short packets (<= 32 bytes) to a 36-byte length.
1092 */
1093 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001094 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301095 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301096 }
1097
1098 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301099 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301100 if (!skb)
1101 return NULL;
1102 }
1103
1104 return skb;
1105}
1106
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001107static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1108{
1109 struct be_queue_info *txq = &txo->q;
1110 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1111
1112 /* Mark the last request eventable if it hasn't been marked already */
1113 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1114 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1115
1116 /* compose a dummy wrb if there are odd set of wrbs to notify */
1117 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001118 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001119 queue_head_inc(txq);
1120 atomic_inc(&txq->used);
1121 txo->pend_wrb_cnt++;
1122 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1123 TX_HDR_WRB_NUM_SHIFT);
1124 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1125 TX_HDR_WRB_NUM_SHIFT);
1126 }
1127 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1128 txo->pend_wrb_cnt = 0;
1129}
1130
Sathya Perlaee9c7992013-05-22 23:04:55 +00001131static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1132{
1133 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001134 u16 q_idx = skb_get_queue_mapping(skb);
1135 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301136 struct be_wrb_params wrb_params = { 0 };
Sathya Perlaee9c7992013-05-22 23:04:55 +00001137 struct be_queue_info *txq = &txo->q;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301138 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001139 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001140
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301141 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001142 if (unlikely(!skb))
1143 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001144
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301145 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1146
1147 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001148 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001149 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001150 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001152
1153 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1154 netif_stop_subqueue(netdev, q_idx);
1155 tx_stats(txo)->tx_stops++;
1156 }
1157
1158 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1159 be_xmit_flush(adapter, txo);
1160
1161 return NETDEV_TX_OK;
1162drop:
1163 tx_stats(txo)->tx_drv_drops++;
1164 /* Flush the already enqueued tx requests */
1165 if (flush && txo->pend_wrb_cnt)
1166 be_xmit_flush(adapter, txo);
1167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 return NETDEV_TX_OK;
1169}
1170
1171static int be_change_mtu(struct net_device *netdev, int new_mtu)
1172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301174 struct device *dev = &adapter->pdev->dev;
1175
1176 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1177 dev_info(dev, "MTU must be between %d and %d bytes\n",
1178 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 return -EINVAL;
1180 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301181
1182 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301183 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 netdev->mtu = new_mtu;
1185 return 0;
1186}
1187
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001188static inline bool be_in_all_promisc(struct be_adapter *adapter)
1189{
1190 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1191 BE_IF_FLAGS_ALL_PROMISCUOUS;
1192}
1193
1194static int be_set_vlan_promisc(struct be_adapter *adapter)
1195{
1196 struct device *dev = &adapter->pdev->dev;
1197 int status;
1198
1199 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1200 return 0;
1201
1202 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1203 if (!status) {
1204 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1205 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1206 } else {
1207 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1208 }
1209 return status;
1210}
1211
1212static int be_clear_vlan_promisc(struct be_adapter *adapter)
1213{
1214 struct device *dev = &adapter->pdev->dev;
1215 int status;
1216
1217 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1218 if (!status) {
1219 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1220 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1221 }
1222 return status;
1223}
1224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001226 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1227 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001228 */
Sathya Perla10329df2012-06-05 19:37:18 +00001229static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230{
Vasundhara Volam50762662014-09-12 17:39:14 +05301231 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001232 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301233 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001234 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001235
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001236 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001237 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001238 return 0;
1239
Sathya Perla92bf14a2013-08-27 16:57:32 +05301240 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001241 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001242
1243 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301244 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1245 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001246
Kalesh AP4d567d92014-05-09 13:29:17 +05301247 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001248 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001249 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001250 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301251 if (addl_status(status) ==
1252 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001253 return be_set_vlan_promisc(adapter);
1254 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1255 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001257 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258}
1259
Patrick McHardy80d5c362013-04-19 02:04:28 +00001260static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001263 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001265 /* Packets with VID 0 are always received by Lancer by default */
1266 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301267 return status;
1268
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301269 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301270 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001271
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301272 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301273 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001274
Somnath Kotura6b74e02014-01-21 15:50:55 +05301275 status = be_vid_config(adapter);
1276 if (status) {
1277 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301278 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301279 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301280
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001281 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282}
1283
Patrick McHardy80d5c362013-04-19 02:04:28 +00001284static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285{
1286 struct be_adapter *adapter = netdev_priv(netdev);
1287
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001288 /* Packets with VID 0 are always received by Lancer by default */
1289 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301290 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001291
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301292 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301293 adapter->vlans_added--;
1294
1295 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296}
1297
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001298static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301299{
Sathya Perlaac34b742015-02-06 08:18:40 -05001300 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001301 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1302}
1303
1304static void be_set_all_promisc(struct be_adapter *adapter)
1305{
1306 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1307 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1308}
1309
1310static void be_set_mc_promisc(struct be_adapter *adapter)
1311{
1312 int status;
1313
1314 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1315 return;
1316
1317 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1318 if (!status)
1319 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1320}
1321
1322static void be_set_mc_list(struct be_adapter *adapter)
1323{
1324 int status;
1325
1326 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1327 if (!status)
1328 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1329 else
1330 be_set_mc_promisc(adapter);
1331}
1332
1333static void be_set_uc_list(struct be_adapter *adapter)
1334{
1335 struct netdev_hw_addr *ha;
1336 int i = 1; /* First slot is claimed by the Primary MAC */
1337
1338 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1339 be_cmd_pmac_del(adapter, adapter->if_handle,
1340 adapter->pmac_id[i], 0);
1341
1342 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1343 be_set_all_promisc(adapter);
1344 return;
1345 }
1346
1347 netdev_for_each_uc_addr(ha, adapter->netdev) {
1348 adapter->uc_macs++; /* First slot is for Primary MAC */
1349 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1350 &adapter->pmac_id[adapter->uc_macs], 0);
1351 }
1352}
1353
1354static void be_clear_uc_list(struct be_adapter *adapter)
1355{
1356 int i;
1357
1358 for (i = 1; i < (adapter->uc_macs + 1); i++)
1359 be_cmd_pmac_del(adapter, adapter->if_handle,
1360 adapter->pmac_id[i], 0);
1361 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301362}
1363
Sathya Perlaa54769f2011-10-24 02:45:00 +00001364static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365{
1366 struct be_adapter *adapter = netdev_priv(netdev);
1367
1368 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001369 be_set_all_promisc(adapter);
1370 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001372
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001373 /* Interface was previously in promiscuous mode; disable it */
1374 if (be_in_all_promisc(adapter)) {
1375 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001376 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001377 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001378 }
1379
Sathya Perlae7b909a2009-11-22 22:01:10 +00001380 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001381 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001382 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1383 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301384 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001385 }
Kalesh APa0794882014-05-30 19:06:23 +05301386
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001387 if (netdev_uc_count(netdev) != adapter->uc_macs)
1388 be_set_uc_list(adapter);
1389
1390 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391}
1392
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001393static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1394{
1395 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001396 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001397 int status;
1398
Sathya Perla11ac75e2011-12-13 00:58:50 +00001399 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001400 return -EPERM;
1401
Sathya Perla11ac75e2011-12-13 00:58:50 +00001402 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001403 return -EINVAL;
1404
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301405 /* Proceed further only if user provided MAC is different
1406 * from active MAC
1407 */
1408 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1409 return 0;
1410
Sathya Perla3175d8c2013-07-23 15:25:03 +05301411 if (BEx_chip(adapter)) {
1412 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1413 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001414
Sathya Perla11ac75e2011-12-13 00:58:50 +00001415 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1416 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301417 } else {
1418 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1419 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001420 }
1421
Kalesh APabccf232014-07-17 16:20:24 +05301422 if (status) {
1423 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1424 mac, vf, status);
1425 return be_cmd_status(status);
1426 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001427
Kalesh APabccf232014-07-17 16:20:24 +05301428 ether_addr_copy(vf_cfg->mac_addr, mac);
1429
1430 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001431}
1432
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001433static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301434 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001437 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001438
Sathya Perla11ac75e2011-12-13 00:58:50 +00001439 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001440 return -EPERM;
1441
Sathya Perla11ac75e2011-12-13 00:58:50 +00001442 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001443 return -EINVAL;
1444
1445 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001446 vi->max_tx_rate = vf_cfg->tx_rate;
1447 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001448 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1449 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001450 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301451 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001452
1453 return 0;
1454}
1455
Sathya Perla748b5392014-05-09 13:29:13 +05301456static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001457{
1458 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001459 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001460 int status = 0;
1461
Sathya Perla11ac75e2011-12-13 00:58:50 +00001462 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001463 return -EPERM;
1464
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001465 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001466 return -EINVAL;
1467
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001468 if (vlan || qos) {
1469 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301470 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001471 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1472 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001473 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001474 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301475 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1476 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001477 }
1478
Kalesh APabccf232014-07-17 16:20:24 +05301479 if (status) {
1480 dev_err(&adapter->pdev->dev,
1481 "VLAN %d config on VF %d failed : %#x\n", vlan,
1482 vf, status);
1483 return be_cmd_status(status);
1484 }
1485
1486 vf_cfg->vlan_tag = vlan;
1487
1488 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001489}
1490
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001491static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1492 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301495 struct device *dev = &adapter->pdev->dev;
1496 int percent_rate, status = 0;
1497 u16 link_speed = 0;
1498 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001499
Sathya Perla11ac75e2011-12-13 00:58:50 +00001500 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001501 return -EPERM;
1502
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001503 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001504 return -EINVAL;
1505
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001506 if (min_tx_rate)
1507 return -EINVAL;
1508
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301509 if (!max_tx_rate)
1510 goto config_qos;
1511
1512 status = be_cmd_link_status_query(adapter, &link_speed,
1513 &link_status, 0);
1514 if (status)
1515 goto err;
1516
1517 if (!link_status) {
1518 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301519 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301520 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001521 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001522
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301523 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1524 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1525 link_speed);
1526 status = -EINVAL;
1527 goto err;
1528 }
1529
1530 /* On Skyhawk the QOS setting must be done only as a % value */
1531 percent_rate = link_speed / 100;
1532 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1533 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1534 percent_rate);
1535 status = -EINVAL;
1536 goto err;
1537 }
1538
1539config_qos:
1540 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001541 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301542 goto err;
1543
1544 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1545 return 0;
1546
1547err:
1548 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1549 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301550 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001551}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301552
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301553static int be_set_vf_link_state(struct net_device *netdev, int vf,
1554 int link_state)
1555{
1556 struct be_adapter *adapter = netdev_priv(netdev);
1557 int status;
1558
1559 if (!sriov_enabled(adapter))
1560 return -EPERM;
1561
1562 if (vf >= adapter->num_vfs)
1563 return -EINVAL;
1564
1565 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301566 if (status) {
1567 dev_err(&adapter->pdev->dev,
1568 "Link state change on VF %d failed: %#x\n", vf, status);
1569 return be_cmd_status(status);
1570 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301571
Kalesh APabccf232014-07-17 16:20:24 +05301572 adapter->vf_cfg[vf].plink_tracking = link_state;
1573
1574 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301575}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001576
Sathya Perla2632baf2013-10-01 16:00:00 +05301577static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1578 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579{
Sathya Perla2632baf2013-10-01 16:00:00 +05301580 aic->rx_pkts_prev = rx_pkts;
1581 aic->tx_reqs_prev = tx_pkts;
1582 aic->jiffies = now;
1583}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001584
Sathya Perla2632baf2013-10-01 16:00:00 +05301585static void be_eqd_update(struct be_adapter *adapter)
1586{
1587 struct be_set_eqd set_eqd[MAX_EVT_QS];
1588 int eqd, i, num = 0, start;
1589 struct be_aic_obj *aic;
1590 struct be_eq_obj *eqo;
1591 struct be_rx_obj *rxo;
1592 struct be_tx_obj *txo;
1593 u64 rx_pkts, tx_pkts;
1594 ulong now;
1595 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596
Sathya Perla2632baf2013-10-01 16:00:00 +05301597 for_all_evt_queues(adapter, eqo, i) {
1598 aic = &adapter->aic_obj[eqo->idx];
1599 if (!aic->enable) {
1600 if (aic->jiffies)
1601 aic->jiffies = 0;
1602 eqd = aic->et_eqd;
1603 goto modify_eqd;
1604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605
Sathya Perla2632baf2013-10-01 16:00:00 +05301606 rxo = &adapter->rx_obj[eqo->idx];
1607 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001608 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301609 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001610 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001611
Sathya Perla2632baf2013-10-01 16:00:00 +05301612 txo = &adapter->tx_obj[eqo->idx];
1613 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001614 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301615 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001616 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001617
Sathya Perla2632baf2013-10-01 16:00:00 +05301618 /* Skip, if wrapped around or first calculation */
1619 now = jiffies;
1620 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1621 rx_pkts < aic->rx_pkts_prev ||
1622 tx_pkts < aic->tx_reqs_prev) {
1623 be_aic_update(aic, rx_pkts, tx_pkts, now);
1624 continue;
1625 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001626
Sathya Perla2632baf2013-10-01 16:00:00 +05301627 delta = jiffies_to_msecs(now - aic->jiffies);
1628 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1629 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1630 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001631
Sathya Perla2632baf2013-10-01 16:00:00 +05301632 if (eqd < 8)
1633 eqd = 0;
1634 eqd = min_t(u32, eqd, aic->max_eqd);
1635 eqd = max_t(u32, eqd, aic->min_eqd);
1636
1637 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301639 if (eqd != aic->prev_eqd) {
1640 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1641 set_eqd[num].eq_id = eqo->q.id;
1642 aic->prev_eqd = eqd;
1643 num++;
1644 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001645 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301646
1647 if (num)
1648 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001649}
1650
Sathya Perla3abcded2010-10-03 22:12:27 -07001651static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301652 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001653{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001654 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001655
Sathya Perlaab1594e2011-07-25 19:10:15 +00001656 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001657 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001658 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001659 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001660 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001661 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001662 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001663 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001664 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665}
1666
Sathya Perla2e588f82011-03-11 02:49:26 +00001667static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001668{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001669 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301670 * Also ignore ipcksm for ipv6 pkts
1671 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001672 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301673 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001674}
1675
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301676static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001678 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301681 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perla3abcded2010-10-03 22:12:27 -07001683 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 BUG_ON(!rx_page_info->page);
1685
Sathya Perlae50287b2014-03-04 12:14:38 +05301686 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001687 dma_unmap_page(&adapter->pdev->dev,
1688 dma_unmap_addr(rx_page_info, bus),
1689 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301690 rx_page_info->last_frag = false;
1691 } else {
1692 dma_sync_single_for_cpu(&adapter->pdev->dev,
1693 dma_unmap_addr(rx_page_info, bus),
1694 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001695 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301697 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 atomic_dec(&rxq->used);
1699 return rx_page_info;
1700}
1701
1702/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001703static void be_rx_compl_discard(struct be_rx_obj *rxo,
1704 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001707 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001709 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301710 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001711 put_page(page_info->page);
1712 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 }
1714}
1715
1716/*
1717 * skb_fill_rx_data forms a complete skb for an ether frame
1718 * indicated by rxcp.
1719 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001720static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1721 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001724 u16 i, j;
1725 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 u8 *start;
1727
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301728 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 start = page_address(page_info->page) + page_info->page_offset;
1730 prefetch(start);
1731
1732 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001733 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 skb->len = curr_frag_len;
1736 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001737 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 /* Complete packet has now been moved to data */
1739 put_page(page_info->page);
1740 skb->data_len = 0;
1741 skb->tail += curr_frag_len;
1742 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001743 hdr_len = ETH_HLEN;
1744 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001746 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 skb_shinfo(skb)->frags[0].page_offset =
1748 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301749 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1750 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001752 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753 skb->tail += hdr_len;
1754 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001755 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756
Sathya Perla2e588f82011-03-11 02:49:26 +00001757 if (rxcp->pkt_size <= rx_frag_size) {
1758 BUG_ON(rxcp->num_rcvd != 1);
1759 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 }
1761
1762 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001763 remaining = rxcp->pkt_size - curr_frag_len;
1764 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301765 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001766 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001768 /* Coalesce all frags from the same physical page in one slot */
1769 if (page_info->page_offset == 0) {
1770 /* Fresh page */
1771 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001772 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001773 skb_shinfo(skb)->frags[j].page_offset =
1774 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001775 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001776 skb_shinfo(skb)->nr_frags++;
1777 } else {
1778 put_page(page_info->page);
1779 }
1780
Eric Dumazet9e903e02011-10-18 21:00:24 +00001781 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 skb->len += curr_frag_len;
1783 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001784 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001785 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001786 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001788 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789}
1790
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001791/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301792static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001793 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001795 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001796 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001798
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001799 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001800 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001801 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001802 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803 return;
1804 }
1805
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001806 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001808 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001809 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001810 else
1811 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001813 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001814 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001816 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301817
Tom Herbertb6c0e892014-08-27 21:27:17 -07001818 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301819 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820
Jiri Pirko343e43c2011-08-25 02:50:51 +00001821 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001822 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001823
1824 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825}
1826
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001827/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001828static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1829 struct napi_struct *napi,
1830 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001832 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001834 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001835 u16 remaining, curr_frag_len;
1836 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001837
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001838 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001839 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001841 return;
1842 }
1843
Sathya Perla2e588f82011-03-11 02:49:26 +00001844 remaining = rxcp->pkt_size;
1845 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301846 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
1848 curr_frag_len = min(remaining, rx_frag_size);
1849
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001850 /* Coalesce all frags from the same physical page in one slot */
1851 if (i == 0 || page_info->page_offset == 0) {
1852 /* First frag or Fresh page */
1853 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001854 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001855 skb_shinfo(skb)->frags[j].page_offset =
1856 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001857 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001858 } else {
1859 put_page(page_info->page);
1860 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001861 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001862 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 memset(page_info, 0, sizeof(*page_info));
1865 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001866 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001868 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001869 skb->len = rxcp->pkt_size;
1870 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001871 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001872 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001873 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001874 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301875
Tom Herbertb6c0e892014-08-27 21:27:17 -07001876 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301877 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001878
Jiri Pirko343e43c2011-08-25 02:50:51 +00001879 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001880 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883}
1884
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1886 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301888 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1889 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1890 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1891 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1892 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1893 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1894 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1895 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1896 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1897 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1898 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001899 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301900 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1901 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001902 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301903 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301904 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301905 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001906}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1909 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001910{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301911 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1912 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1913 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1914 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1915 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1916 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1917 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1918 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1919 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1920 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1921 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001922 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301923 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1924 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001925 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301926 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1927 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001928}
1929
1930static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1931{
1932 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1933 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1934 struct be_adapter *adapter = rxo->adapter;
1935
1936 /* For checking the valid bit it is Ok to use either definition as the
1937 * valid bit is at the same position in both v0 and v1 Rx compl */
1938 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 return NULL;
1940
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001941 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001942 be_dws_le_to_cpu(compl, sizeof(*compl));
1943
1944 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001946 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001948
Somnath Koture38b1702013-05-29 22:55:56 +00001949 if (rxcp->ip_frag)
1950 rxcp->l4_csum = 0;
1951
Sathya Perla15d72182011-03-21 20:49:26 +00001952 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301953 /* In QNQ modes, if qnq bit is not set, then the packet was
1954 * tagged only with the transparent outer vlan-tag and must
1955 * not be treated as a vlan packet by host
1956 */
1957 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001958 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001959
Sathya Perla15d72182011-03-21 20:49:26 +00001960 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001961 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001962
Somnath Kotur939cf302011-08-18 21:51:49 -07001963 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301964 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001965 rxcp->vlanf = 0;
1966 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001967
1968 /* As the compl has been parsed, reset it; we wont touch it again */
1969 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972 return rxcp;
1973}
1974
Eric Dumazet1829b082011-03-01 05:48:12 +00001975static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001980 gfp |= __GFP_COMP;
1981 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982}
1983
1984/*
1985 * Allocate a page, split it to fragments of size rx_frag_size and post as
1986 * receive buffers to BE
1987 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301988static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989{
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001991 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001994 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 struct be_eth_rx_d *rxd;
1996 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301997 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302000 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002002 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002004 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005 break;
2006 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002007 page_dmaaddr = dma_map_page(dev, pagep, 0,
2008 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002009 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002010 if (dma_mapping_error(dev, page_dmaaddr)) {
2011 put_page(pagep);
2012 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302013 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002014 break;
2015 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302016 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 } else {
2018 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302019 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302021 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
2024 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302025 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2027 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028
2029 /* Any space left in the current big page for another frag? */
2030 if ((page_offset + rx_frag_size + rx_frag_size) >
2031 adapter->big_page_size) {
2032 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302033 page_info->last_frag = true;
2034 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2035 } else {
2036 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002038
2039 prev_page_info = page_info;
2040 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302043
2044 /* Mark the last frag of a page when we break out of the above loop
2045 * with no more slots available in the RXQ
2046 */
2047 if (pagep) {
2048 prev_page_info->last_frag = true;
2049 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2050 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051
2052 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302054 if (rxo->rx_post_starved)
2055 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302056 do {
2057 notify = min(256u, posted);
2058 be_rxq_notify(adapter, rxq->id, notify);
2059 posted -= notify;
2060 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002061 } else if (atomic_read(&rxq->used) == 0) {
2062 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002063 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065}
2066
Sathya Perla5fb379e2009-06-18 00:02:59 +00002067static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
2070
2071 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2072 return NULL;
2073
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002074 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2076
2077 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2078
2079 queue_tail_inc(tx_cq);
2080 return txcp;
2081}
2082
Sathya Perla3c8def92011-06-12 20:01:58 +00002083static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302084 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085{
Sathya Perla3c8def92011-06-12 20:01:58 +00002086 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002087 struct be_queue_info *txq = &txo->q;
2088 u16 frag_index, num_wrbs = 0;
2089 struct sk_buff *skb = NULL;
2090 bool unmap_skb_hdr = false;
2091 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002093 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002094 if (sent_skbs[txq->tail]) {
2095 /* Free skb from prev req */
2096 if (skb)
2097 dev_consume_skb_any(skb);
2098 skb = sent_skbs[txq->tail];
2099 sent_skbs[txq->tail] = NULL;
2100 queue_tail_inc(txq); /* skip hdr wrb */
2101 num_wrbs++;
2102 unmap_skb_hdr = true;
2103 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002104 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002105 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002106 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002107 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002108 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002110 num_wrbs++;
2111 } while (frag_index != last_index);
2112 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002114 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115}
2116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117/* Return the number of events in the event queue */
2118static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002119{
2120 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 do {
2124 eqe = queue_tail_node(&eqo->q);
2125 if (eqe->evt == 0)
2126 break;
2127
2128 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002129 eqe->evt = 0;
2130 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 queue_tail_inc(&eqo->q);
2132 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002133
2134 return num;
2135}
2136
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137/* Leaves the EQ is disarmed state */
2138static void be_eq_clean(struct be_eq_obj *eqo)
2139{
2140 int num = events_get(eqo);
2141
2142 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2143}
2144
2145static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146{
2147 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 struct be_queue_info *rxq = &rxo->q;
2149 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002150 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002151 struct be_adapter *adapter = rxo->adapter;
2152 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153
Sathya Perlad23e9462012-12-17 19:38:51 +00002154 /* Consume pending rx completions.
2155 * Wait for the flush completion (identified by zero num_rcvd)
2156 * to arrive. Notify CQ even when there are no more CQ entries
2157 * for HW to flush partially coalesced CQ entries.
2158 * In Lancer, there is no need to wait for flush compl.
2159 */
2160 for (;;) {
2161 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302162 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002163 if (lancer_chip(adapter))
2164 break;
2165
2166 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2167 dev_warn(&adapter->pdev->dev,
2168 "did not receive flush compl\n");
2169 break;
2170 }
2171 be_cq_notify(adapter, rx_cq->id, true, 0);
2172 mdelay(1);
2173 } else {
2174 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002175 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002176 if (rxcp->num_rcvd == 0)
2177 break;
2178 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 }
2180
Sathya Perlad23e9462012-12-17 19:38:51 +00002181 /* After cleanup, leave the CQ in unarmed state */
2182 be_cq_notify(adapter, rx_cq->id, false, 0);
2183
2184 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302185 while (atomic_read(&rxq->used) > 0) {
2186 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187 put_page(page_info->page);
2188 memset(page_info, 0, sizeof(*page_info));
2189 }
2190 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302191 rxq->tail = 0;
2192 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193}
2194
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002195static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002197 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2198 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002199 struct be_tx_obj *txo;
2200 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002201 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002202 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302204 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002205 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002206 pending_txqs = adapter->num_tx_qs;
2207
2208 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302209 cmpl = 0;
2210 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002211 txq = &txo->q;
2212 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302213 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002214 num_wrbs += be_tx_compl_process(adapter, txo,
2215 end_idx);
2216 cmpl++;
2217 }
2218 if (cmpl) {
2219 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2220 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302221 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002222 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002223 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002224 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002225 }
2226
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302227 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002228 break;
2229
2230 mdelay(1);
2231 } while (true);
2232
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002233 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002234 for_all_tx_queues(adapter, txo, i) {
2235 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002236
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002237 if (atomic_read(&txq->used)) {
2238 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2239 i, atomic_read(&txq->used));
2240 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002241 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002242 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2243 txq->len);
2244 /* Use the tx-compl process logic to handle requests
2245 * that were not sent to the HW.
2246 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002247 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2248 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002249 BUG_ON(atomic_read(&txq->used));
2250 txo->pend_wrb_cnt = 0;
2251 /* Since hw was never notified of these requests,
2252 * reset TXQ indices
2253 */
2254 txq->head = notified_idx;
2255 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002256 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002257 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258}
2259
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260static void be_evt_queues_destroy(struct be_adapter *adapter)
2261{
2262 struct be_eq_obj *eqo;
2263 int i;
2264
2265 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002266 if (eqo->q.created) {
2267 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302269 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302270 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002271 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002272 be_queue_free(adapter, &eqo->q);
2273 }
2274}
2275
2276static int be_evt_queues_create(struct be_adapter *adapter)
2277{
2278 struct be_queue_info *eq;
2279 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302280 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 int i, rc;
2282
Sathya Perla92bf14a2013-08-27 16:57:32 +05302283 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2284 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285
2286 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302287 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2288 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302289 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302290 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302293 aic->max_eqd = BE_MAX_EQD;
2294 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295
2296 eq = &eqo->q;
2297 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302298 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 if (rc)
2300 return rc;
2301
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302302 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 if (rc)
2304 return rc;
2305 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002306 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307}
2308
Sathya Perla5fb379e2009-06-18 00:02:59 +00002309static void be_mcc_queues_destroy(struct be_adapter *adapter)
2310{
2311 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002312
Sathya Perla8788fdc2009-07-27 22:52:03 +00002313 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002314 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002315 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002316 be_queue_free(adapter, q);
2317
Sathya Perla8788fdc2009-07-27 22:52:03 +00002318 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002319 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002320 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002321 be_queue_free(adapter, q);
2322}
2323
2324/* Must be called only after TX qs are created as MCC shares TX EQ */
2325static int be_mcc_queues_create(struct be_adapter *adapter)
2326{
2327 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002328
Sathya Perla8788fdc2009-07-27 22:52:03 +00002329 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002330 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302331 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002332 goto err;
2333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 /* Use the default EQ for MCC completions */
2335 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002336 goto mcc_cq_free;
2337
Sathya Perla8788fdc2009-07-27 22:52:03 +00002338 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002339 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2340 goto mcc_cq_destroy;
2341
Sathya Perla8788fdc2009-07-27 22:52:03 +00002342 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002343 goto mcc_q_free;
2344
2345 return 0;
2346
2347mcc_q_free:
2348 be_queue_free(adapter, q);
2349mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002350 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002351mcc_cq_free:
2352 be_queue_free(adapter, cq);
2353err:
2354 return -1;
2355}
2356
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357static void be_tx_queues_destroy(struct be_adapter *adapter)
2358{
2359 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002360 struct be_tx_obj *txo;
2361 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362
Sathya Perla3c8def92011-06-12 20:01:58 +00002363 for_all_tx_queues(adapter, txo, i) {
2364 q = &txo->q;
2365 if (q->created)
2366 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2367 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368
Sathya Perla3c8def92011-06-12 20:01:58 +00002369 q = &txo->cq;
2370 if (q->created)
2371 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2372 be_queue_free(adapter, q);
2373 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374}
2375
Sathya Perla77071332013-08-27 16:57:34 +05302376static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002379 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302380 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381
Sathya Perla92bf14a2013-08-27 16:57:32 +05302382 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002383
Sathya Perla3c8def92011-06-12 20:01:58 +00002384 for_all_tx_queues(adapter, txo, i) {
2385 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2387 sizeof(struct be_eth_tx_compl));
2388 if (status)
2389 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390
John Stultz827da442013-10-07 15:51:58 -07002391 u64_stats_init(&txo->stats.sync);
2392 u64_stats_init(&txo->stats.sync_compl);
2393
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002394 /* If num_evt_qs is less than num_tx_qs, then more than
2395 * one txq share an eq
2396 */
2397 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2398 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2399 if (status)
2400 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2403 sizeof(struct be_eth_wrb));
2404 if (status)
2405 return status;
2406
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002407 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408 if (status)
2409 return status;
2410 }
2411
Sathya Perlad3791422012-09-28 04:39:44 +00002412 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2413 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 return 0;
2415}
2416
2417static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418{
2419 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002420 struct be_rx_obj *rxo;
2421 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422
Sathya Perla3abcded2010-10-03 22:12:27 -07002423 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002424 q = &rxo->cq;
2425 if (q->created)
2426 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2427 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429}
2430
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002432{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002434 struct be_rx_obj *rxo;
2435 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436
Sathya Perla92bf14a2013-08-27 16:57:32 +05302437 /* We can create as many RSS rings as there are EQs. */
2438 adapter->num_rx_qs = adapter->num_evt_qs;
2439
2440 /* We'll use RSS only if atleast 2 RSS rings are supported.
2441 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302443 if (adapter->num_rx_qs > 1)
2444 adapter->num_rx_qs++;
2445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002447 for_all_rx_queues(adapter, rxo, i) {
2448 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002449 cq = &rxo->cq;
2450 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302451 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002452 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002454
John Stultz827da442013-10-07 15:51:58 -07002455 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2457 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002458 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002460 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461
Sathya Perlad3791422012-09-28 04:39:44 +00002462 dev_info(&adapter->pdev->dev,
2463 "created %d RSS queue(s) and 1 default RX queue\n",
2464 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002466}
2467
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002468static irqreturn_t be_intx(int irq, void *dev)
2469{
Sathya Perlae49cc342012-11-27 19:50:02 +00002470 struct be_eq_obj *eqo = dev;
2471 struct be_adapter *adapter = eqo->adapter;
2472 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002474 /* IRQ is not expected when NAPI is scheduled as the EQ
2475 * will not be armed.
2476 * But, this can happen on Lancer INTx where it takes
2477 * a while to de-assert INTx or in BE2 where occasionaly
2478 * an interrupt may be raised even when EQ is unarmed.
2479 * If NAPI is already scheduled, then counting & notifying
2480 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002481 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002482 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002483 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002484 __napi_schedule(&eqo->napi);
2485 if (num_evts)
2486 eqo->spurious_intr = 0;
2487 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002488 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002489
2490 /* Return IRQ_HANDLED only for the the first spurious intr
2491 * after a valid intr to stop the kernel from branding
2492 * this irq as a bad one!
2493 */
2494 if (num_evts || eqo->spurious_intr++ == 0)
2495 return IRQ_HANDLED;
2496 else
2497 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498}
2499
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002500static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002502 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503
Sathya Perla0b545a62012-11-23 00:27:18 +00002504 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2505 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506 return IRQ_HANDLED;
2507}
2508
Sathya Perla2e588f82011-03-11 02:49:26 +00002509static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510{
Somnath Koture38b1702013-05-29 22:55:56 +00002511 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512}
2513
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002514static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302515 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516{
Sathya Perla3abcded2010-10-03 22:12:27 -07002517 struct be_adapter *adapter = rxo->adapter;
2518 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002519 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302521 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522
2523 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002524 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002525 if (!rxcp)
2526 break;
2527
Sathya Perla12004ae2011-08-02 19:57:46 +00002528 /* Is it a flush compl that has no data */
2529 if (unlikely(rxcp->num_rcvd == 0))
2530 goto loop_continue;
2531
2532 /* Discard compl with partial DMA Lancer B0 */
2533 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002534 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002535 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002536 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002537
Sathya Perla12004ae2011-08-02 19:57:46 +00002538 /* On BE drop pkts that arrive due to imperfect filtering in
2539 * promiscuous mode on some skews
2540 */
2541 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302542 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002544 goto loop_continue;
2545 }
2546
Sathya Perla6384a4d2013-10-25 10:40:16 +05302547 /* Don't do gro when we're busy_polling */
2548 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002549 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002550 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302551 be_rx_compl_process(rxo, napi, rxcp);
2552
Sathya Perla12004ae2011-08-02 19:57:46 +00002553loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302554 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002555 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556 }
2557
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002558 if (work_done) {
2559 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002560
Sathya Perla6384a4d2013-10-25 10:40:16 +05302561 /* When an rx-obj gets into post_starved state, just
2562 * let be_worker do the posting.
2563 */
2564 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2565 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302566 be_post_rx_frags(rxo, GFP_ATOMIC,
2567 max_t(u32, MAX_RX_POST,
2568 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571 return work_done;
2572}
2573
Kalesh AP512bb8a2014-09-02 09:56:49 +05302574static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2575{
2576 switch (status) {
2577 case BE_TX_COMP_HDR_PARSE_ERR:
2578 tx_stats(txo)->tx_hdr_parse_err++;
2579 break;
2580 case BE_TX_COMP_NDMA_ERR:
2581 tx_stats(txo)->tx_dma_err++;
2582 break;
2583 case BE_TX_COMP_ACL_ERR:
2584 tx_stats(txo)->tx_spoof_check_err++;
2585 break;
2586 }
2587}
2588
2589static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2590{
2591 switch (status) {
2592 case LANCER_TX_COMP_LSO_ERR:
2593 tx_stats(txo)->tx_tso_err++;
2594 break;
2595 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2596 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2597 tx_stats(txo)->tx_spoof_check_err++;
2598 break;
2599 case LANCER_TX_COMP_QINQ_ERR:
2600 tx_stats(txo)->tx_qinq_err++;
2601 break;
2602 case LANCER_TX_COMP_PARITY_ERR:
2603 tx_stats(txo)->tx_internal_parity_err++;
2604 break;
2605 case LANCER_TX_COMP_DMA_ERR:
2606 tx_stats(txo)->tx_dma_err++;
2607 break;
2608 }
2609}
2610
Sathya Perlac8f64612014-09-02 09:56:55 +05302611static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2612 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002613{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302615 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302616 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302617 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618
Sathya Perlac8f64612014-09-02 09:56:55 +05302619 while ((txcp = be_tx_compl_get(&txo->cq))) {
2620 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2621 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2622 work_done++;
2623
Kalesh AP512bb8a2014-09-02 09:56:49 +05302624 compl_status = GET_TX_COMPL_BITS(status, txcp);
2625 if (compl_status) {
2626 if (lancer_chip(adapter))
2627 lancer_update_tx_err(txo, compl_status);
2628 else
2629 be_update_tx_err(txo, compl_status);
2630 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 }
2632
2633 if (work_done) {
2634 be_cq_notify(adapter, txo->cq.id, true, work_done);
2635 atomic_sub(num_wrbs, &txo->q.used);
2636
2637 /* As Tx wrbs have been freed up, wake up netdev queue
2638 * if it was stopped due to lack of tx wrbs. */
2639 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302640 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002642 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002643
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2645 tx_stats(txo)->tx_compl += work_done;
2646 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2647 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648}
Sathya Perla3c8def92011-06-12 20:01:58 +00002649
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002650#ifdef CONFIG_NET_RX_BUSY_POLL
2651static inline bool be_lock_napi(struct be_eq_obj *eqo)
2652{
2653 bool status = true;
2654
2655 spin_lock(&eqo->lock); /* BH is already disabled */
2656 if (eqo->state & BE_EQ_LOCKED) {
2657 WARN_ON(eqo->state & BE_EQ_NAPI);
2658 eqo->state |= BE_EQ_NAPI_YIELD;
2659 status = false;
2660 } else {
2661 eqo->state = BE_EQ_NAPI;
2662 }
2663 spin_unlock(&eqo->lock);
2664 return status;
2665}
2666
2667static inline void be_unlock_napi(struct be_eq_obj *eqo)
2668{
2669 spin_lock(&eqo->lock); /* BH is already disabled */
2670
2671 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2672 eqo->state = BE_EQ_IDLE;
2673
2674 spin_unlock(&eqo->lock);
2675}
2676
2677static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2678{
2679 bool status = true;
2680
2681 spin_lock_bh(&eqo->lock);
2682 if (eqo->state & BE_EQ_LOCKED) {
2683 eqo->state |= BE_EQ_POLL_YIELD;
2684 status = false;
2685 } else {
2686 eqo->state |= BE_EQ_POLL;
2687 }
2688 spin_unlock_bh(&eqo->lock);
2689 return status;
2690}
2691
2692static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2693{
2694 spin_lock_bh(&eqo->lock);
2695
2696 WARN_ON(eqo->state & (BE_EQ_NAPI));
2697 eqo->state = BE_EQ_IDLE;
2698
2699 spin_unlock_bh(&eqo->lock);
2700}
2701
2702static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2703{
2704 spin_lock_init(&eqo->lock);
2705 eqo->state = BE_EQ_IDLE;
2706}
2707
2708static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2709{
2710 local_bh_disable();
2711
2712 /* It's enough to just acquire napi lock on the eqo to stop
2713 * be_busy_poll() from processing any queueus.
2714 */
2715 while (!be_lock_napi(eqo))
2716 mdelay(1);
2717
2718 local_bh_enable();
2719}
2720
2721#else /* CONFIG_NET_RX_BUSY_POLL */
2722
2723static inline bool be_lock_napi(struct be_eq_obj *eqo)
2724{
2725 return true;
2726}
2727
2728static inline void be_unlock_napi(struct be_eq_obj *eqo)
2729{
2730}
2731
2732static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2733{
2734 return false;
2735}
2736
2737static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2738{
2739}
2740
2741static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2742{
2743}
2744
2745static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2746{
2747}
2748#endif /* CONFIG_NET_RX_BUSY_POLL */
2749
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302750int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751{
2752 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2753 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002754 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302755 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302756 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002757
Sathya Perla0b545a62012-11-23 00:27:18 +00002758 num_evts = events_get(eqo);
2759
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302760 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2761 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762
Sathya Perla6384a4d2013-10-25 10:40:16 +05302763 if (be_lock_napi(eqo)) {
2764 /* This loop will iterate twice for EQ0 in which
2765 * completions of the last RXQ (default one) are also processed
2766 * For other EQs the loop iterates only once
2767 */
2768 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2769 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2770 max_work = max(work, max_work);
2771 }
2772 be_unlock_napi(eqo);
2773 } else {
2774 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002775 }
2776
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002777 if (is_mcc_eqo(eqo))
2778 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002779
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002780 if (max_work < budget) {
2781 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002782 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 } else {
2784 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002785 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002786 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002787 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788}
2789
Sathya Perla6384a4d2013-10-25 10:40:16 +05302790#ifdef CONFIG_NET_RX_BUSY_POLL
2791static int be_busy_poll(struct napi_struct *napi)
2792{
2793 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2794 struct be_adapter *adapter = eqo->adapter;
2795 struct be_rx_obj *rxo;
2796 int i, work = 0;
2797
2798 if (!be_lock_busy_poll(eqo))
2799 return LL_FLUSH_BUSY;
2800
2801 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2802 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2803 if (work)
2804 break;
2805 }
2806
2807 be_unlock_busy_poll(eqo);
2808 return work;
2809}
2810#endif
2811
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002812void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002813{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002814 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2815 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002816 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302817 bool error_detected = false;
2818 struct device *dev = &adapter->pdev->dev;
2819 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002820
Sathya Perlad23e9462012-12-17 19:38:51 +00002821 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002822 return;
2823
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002824 if (lancer_chip(adapter)) {
2825 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2826 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2827 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302828 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002829 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302830 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302831 adapter->hw_error = true;
2832 /* Do not log error messages if its a FW reset */
2833 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2834 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2835 dev_info(dev, "Firmware update in progress\n");
2836 } else {
2837 error_detected = true;
2838 dev_err(dev, "Error detected in the card\n");
2839 dev_err(dev, "ERR: sliport status 0x%x\n",
2840 sliport_status);
2841 dev_err(dev, "ERR: sliport error1 0x%x\n",
2842 sliport_err1);
2843 dev_err(dev, "ERR: sliport error2 0x%x\n",
2844 sliport_err2);
2845 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002846 }
2847 } else {
2848 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302849 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002850 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302851 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002852 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302853 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002854 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302855 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002856
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002857 ue_lo = (ue_lo & ~ue_lo_mask);
2858 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002859
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302860 /* On certain platforms BE hardware can indicate spurious UEs.
2861 * Allow HW to stop working completely in case of a real UE.
2862 * Hence not setting the hw_error for UE detection.
2863 */
2864
2865 if (ue_lo || ue_hi) {
2866 error_detected = true;
2867 dev_err(dev,
2868 "Unrecoverable Error detected in the adapter");
2869 dev_err(dev, "Please reboot server to recover");
2870 if (skyhawk_chip(adapter))
2871 adapter->hw_error = true;
2872 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2873 if (ue_lo & 1)
2874 dev_err(dev, "UE: %s bit set\n",
2875 ue_status_low_desc[i]);
2876 }
2877 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2878 if (ue_hi & 1)
2879 dev_err(dev, "UE: %s bit set\n",
2880 ue_status_hi_desc[i]);
2881 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302882 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002883 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302884 if (error_detected)
2885 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002886}
2887
Sathya Perla8d56ff12009-11-22 22:02:26 +00002888static void be_msix_disable(struct be_adapter *adapter)
2889{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002890 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002891 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002892 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302893 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002894 }
2895}
2896
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002897static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002898{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002899 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002900 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002901
Sathya Perla92bf14a2013-08-27 16:57:32 +05302902 /* If RoCE is supported, program the max number of NIC vectors that
2903 * may be configured via set-channels, along with vectors needed for
2904 * RoCe. Else, just program the number we'll use initially.
2905 */
2906 if (be_roce_supported(adapter))
2907 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2908 2 * num_online_cpus());
2909 else
2910 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002911
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002912 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002913 adapter->msix_entries[i].entry = i;
2914
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002915 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2916 MIN_MSIX_VECTORS, num_vec);
2917 if (num_vec < 0)
2918 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002919
Sathya Perla92bf14a2013-08-27 16:57:32 +05302920 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2921 adapter->num_msix_roce_vec = num_vec / 2;
2922 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2923 adapter->num_msix_roce_vec);
2924 }
2925
2926 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2927
2928 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2929 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002930 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002931
2932fail:
2933 dev_warn(dev, "MSIx enable failed\n");
2934
2935 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2936 if (!be_physfn(adapter))
2937 return num_vec;
2938 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939}
2940
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002941static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302942 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002943{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302944 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002945}
2946
2947static int be_msix_register(struct be_adapter *adapter)
2948{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 struct net_device *netdev = adapter->netdev;
2950 struct be_eq_obj *eqo;
2951 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953 for_all_evt_queues(adapter, eqo, i) {
2954 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2955 vec = be_msix_vec_get(adapter, eqo);
2956 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002957 if (status)
2958 goto err_msix;
2959 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002960
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002961 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002962err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2964 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2965 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302966 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002967 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968 return status;
2969}
2970
2971static int be_irq_register(struct be_adapter *adapter)
2972{
2973 struct net_device *netdev = adapter->netdev;
2974 int status;
2975
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002976 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977 status = be_msix_register(adapter);
2978 if (status == 0)
2979 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002980 /* INTx is not supported for VF */
2981 if (!be_physfn(adapter))
2982 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983 }
2984
Sathya Perlae49cc342012-11-27 19:50:02 +00002985 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002986 netdev->irq = adapter->pdev->irq;
2987 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002988 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002989 if (status) {
2990 dev_err(&adapter->pdev->dev,
2991 "INTx request IRQ failed - err %d\n", status);
2992 return status;
2993 }
2994done:
2995 adapter->isr_registered = true;
2996 return 0;
2997}
2998
2999static void be_irq_unregister(struct be_adapter *adapter)
3000{
3001 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003002 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003003 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004
3005 if (!adapter->isr_registered)
3006 return;
3007
3008 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003009 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003010 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003011 goto done;
3012 }
3013
3014 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003015 for_all_evt_queues(adapter, eqo, i)
3016 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018done:
3019 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003020}
3021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003022static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003023{
3024 struct be_queue_info *q;
3025 struct be_rx_obj *rxo;
3026 int i;
3027
3028 for_all_rx_queues(adapter, rxo, i) {
3029 q = &rxo->q;
3030 if (q->created) {
3031 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003032 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003033 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003034 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003035 }
3036}
3037
Sathya Perla889cd4b2010-05-30 23:33:45 +00003038static int be_close(struct net_device *netdev)
3039{
3040 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003041 struct be_eq_obj *eqo;
3042 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003043
Kalesh APe1ad8e32014-04-14 16:12:41 +05303044 /* This protection is needed as be_close() may be called even when the
3045 * adapter is in cleared state (after eeh perm failure)
3046 */
3047 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3048 return 0;
3049
Parav Pandit045508a2012-03-26 14:27:13 +00003050 be_roce_dev_close(adapter);
3051
Ivan Veceradff345c52013-11-27 08:59:32 +01003052 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3053 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003054 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303055 be_disable_busy_poll(eqo);
3056 }
David S. Miller71237b62013-11-28 18:53:36 -05003057 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003058 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003059
3060 be_async_mcc_disable(adapter);
3061
3062 /* Wait for all pending tx completions to arrive so that
3063 * all tx skbs are freed.
3064 */
Sathya Perlafba87552013-05-08 02:05:50 +00003065 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303066 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003067
3068 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003069 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003070
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003071 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003072 if (msix_enabled(adapter))
3073 synchronize_irq(be_msix_vec_get(adapter, eqo));
3074 else
3075 synchronize_irq(netdev->irq);
3076 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003077 }
3078
Sathya Perla889cd4b2010-05-30 23:33:45 +00003079 be_irq_unregister(adapter);
3080
Sathya Perla482c9e72011-06-29 23:33:17 +00003081 return 0;
3082}
3083
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003084static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003085{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003086 struct rss_info *rss = &adapter->rss_info;
3087 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003088 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003089 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003090
3091 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003092 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3093 sizeof(struct be_eth_rx_d));
3094 if (rc)
3095 return rc;
3096 }
3097
3098 /* The FW would like the default RXQ to be created first */
3099 rxo = default_rxo(adapter);
3100 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3101 adapter->if_handle, false, &rxo->rss_id);
3102 if (rc)
3103 return rc;
3104
3105 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003106 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003107 rx_frag_size, adapter->if_handle,
3108 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003109 if (rc)
3110 return rc;
3111 }
3112
3113 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303114 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3115 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003116 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303117 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003118 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303119 rss->rsstable[j + i] = rxo->rss_id;
3120 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003121 }
3122 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303123 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3124 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003125
3126 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303127 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3128 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303129 } else {
3130 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303131 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303132 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003133
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003134 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303135 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003136 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303137 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303138 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303139 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003140 }
3141
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003142 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303143
Sathya Perla482c9e72011-06-29 23:33:17 +00003144 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003145 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303146 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003147 return 0;
3148}
3149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150static int be_open(struct net_device *netdev)
3151{
3152 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003153 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003154 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003155 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003156 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003157 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003159 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003160 if (status)
3161 goto err;
3162
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003163 status = be_irq_register(adapter);
3164 if (status)
3165 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003166
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003167 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003168 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003170 for_all_tx_queues(adapter, txo, i)
3171 be_cq_notify(adapter, txo->cq.id, true, 0);
3172
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003173 be_async_mcc_enable(adapter);
3174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003175 for_all_evt_queues(adapter, eqo, i) {
3176 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303177 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303178 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003179 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003180 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003181
Sathya Perla323ff712012-09-28 04:39:43 +00003182 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003183 if (!status)
3184 be_link_status_update(adapter, link_status);
3185
Sathya Perlafba87552013-05-08 02:05:50 +00003186 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003187 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303188
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303189#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303190 if (skyhawk_chip(adapter))
3191 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303192#endif
3193
Sathya Perla889cd4b2010-05-30 23:33:45 +00003194 return 0;
3195err:
3196 be_close(adapter->netdev);
3197 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003198}
3199
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003200static int be_setup_wol(struct be_adapter *adapter, bool enable)
3201{
3202 struct be_dma_mem cmd;
3203 int status = 0;
3204 u8 mac[ETH_ALEN];
3205
3206 memset(mac, 0, ETH_ALEN);
3207
3208 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07003209 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3210 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303211 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303212 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003213
3214 if (enable) {
3215 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303216 PCICFG_PM_CONTROL_OFFSET,
3217 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003218 if (status) {
3219 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003220 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003221 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3222 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003223 return status;
3224 }
3225 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303226 adapter->netdev->dev_addr,
3227 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003228 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3229 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3230 } else {
3231 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3232 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3233 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3234 }
3235
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003236 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003237 return status;
3238}
3239
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003240static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3241{
3242 u32 addr;
3243
3244 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3245
3246 mac[5] = (u8)(addr & 0xFF);
3247 mac[4] = (u8)((addr >> 8) & 0xFF);
3248 mac[3] = (u8)((addr >> 16) & 0xFF);
3249 /* Use the OUI from the current MAC address */
3250 memcpy(mac, adapter->netdev->dev_addr, 3);
3251}
3252
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003253/*
3254 * Generate a seed MAC address from the PF MAC Address using jhash.
3255 * MAC Address for VFs are assigned incrementally starting from the seed.
3256 * These addresses are programmed in the ASIC by the PF and the VF driver
3257 * queries for the MAC address during its probe.
3258 */
Sathya Perla4c876612013-02-03 20:30:11 +00003259static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003260{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003261 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003262 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003263 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003264 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003265
3266 be_vf_eth_addr_generate(adapter, mac);
3267
Sathya Perla11ac75e2011-12-13 00:58:50 +00003268 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303269 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003270 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003271 vf_cfg->if_handle,
3272 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303273 else
3274 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3275 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003276
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003277 if (status)
3278 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303279 "Mac address assignment failed for VF %d\n",
3280 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003281 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003282 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003283
3284 mac[5] += 1;
3285 }
3286 return status;
3287}
3288
Sathya Perla4c876612013-02-03 20:30:11 +00003289static int be_vfs_mac_query(struct be_adapter *adapter)
3290{
3291 int status, vf;
3292 u8 mac[ETH_ALEN];
3293 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003294
3295 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303296 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3297 mac, vf_cfg->if_handle,
3298 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003299 if (status)
3300 return status;
3301 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3302 }
3303 return 0;
3304}
3305
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003306static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003307{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003308 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003309 u32 vf;
3310
Sathya Perla257a3fe2013-06-14 15:54:51 +05303311 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003312 dev_warn(&adapter->pdev->dev,
3313 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003314 goto done;
3315 }
3316
Sathya Perlab4c1df92013-05-08 02:05:47 +00003317 pci_disable_sriov(adapter->pdev);
3318
Sathya Perla11ac75e2011-12-13 00:58:50 +00003319 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303320 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003321 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3322 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303323 else
3324 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3325 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003326
Sathya Perla11ac75e2011-12-13 00:58:50 +00003327 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3328 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003329done:
3330 kfree(adapter->vf_cfg);
3331 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303332 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003333}
3334
Sathya Perla77071332013-08-27 16:57:34 +05303335static void be_clear_queues(struct be_adapter *adapter)
3336{
3337 be_mcc_queues_destroy(adapter);
3338 be_rx_cqs_destroy(adapter);
3339 be_tx_queues_destroy(adapter);
3340 be_evt_queues_destroy(adapter);
3341}
3342
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303343static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003344{
Sathya Perla191eb752012-02-23 18:50:13 +00003345 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3346 cancel_delayed_work_sync(&adapter->work);
3347 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3348 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303349}
3350
Somnath Koturb05004a2013-12-05 12:08:16 +05303351static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303352{
Somnath Koturb05004a2013-12-05 12:08:16 +05303353 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003354 be_cmd_pmac_del(adapter, adapter->if_handle,
3355 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303356 kfree(adapter->pmac_id);
3357 adapter->pmac_id = NULL;
3358 }
3359}
3360
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303361#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303362static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3363{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003364 struct net_device *netdev = adapter->netdev;
3365
Sathya Perlac9c47142014-03-27 10:46:19 +05303366 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3367 be_cmd_manage_iface(adapter, adapter->if_handle,
3368 OP_CONVERT_TUNNEL_TO_NORMAL);
3369
3370 if (adapter->vxlan_port)
3371 be_cmd_set_vxlan_port(adapter, 0);
3372
3373 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3374 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003375
3376 netdev->hw_enc_features = 0;
3377 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303378 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303379}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303380#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303381
Somnath Koturb05004a2013-12-05 12:08:16 +05303382static int be_clear(struct be_adapter *adapter)
3383{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303384 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003385
Sathya Perla11ac75e2011-12-13 00:58:50 +00003386 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003387 be_vf_clear(adapter);
3388
Vasundhara Volambec84e62014-06-30 13:01:32 +05303389 /* Re-configure FW to distribute resources evenly across max-supported
3390 * number of VFs, only when VFs are not already enabled.
3391 */
3392 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3393 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3394 pci_sriov_get_totalvfs(adapter->pdev));
3395
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303396#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303397 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303398#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303399 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303400 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003401
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003402 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003403
Sathya Perla77071332013-08-27 16:57:34 +05303404 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003405
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003406 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303407 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003408 return 0;
3409}
3410
Kalesh AP0700d812015-01-20 03:51:43 -05003411static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3412 u32 cap_flags, u32 vf)
3413{
3414 u32 en_flags;
3415 int status;
3416
3417 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3418 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3419 BE_IF_FLAGS_RSS;
3420
3421 en_flags &= cap_flags;
3422
3423 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3424 if_handle, vf);
3425
3426 return status;
3427}
3428
Sathya Perla4c876612013-02-03 20:30:11 +00003429static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003430{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303431 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003432 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003433 u32 cap_flags, vf;
3434 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003435
Kalesh AP0700d812015-01-20 03:51:43 -05003436 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003437 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3438 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003439
Sathya Perla4c876612013-02-03 20:30:11 +00003440 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303441 if (!BE3_chip(adapter)) {
3442 status = be_cmd_get_profile_config(adapter, &res,
3443 vf + 1);
3444 if (!status)
3445 cap_flags = res.if_cap_flags;
3446 }
Sathya Perla4c876612013-02-03 20:30:11 +00003447
Kalesh AP0700d812015-01-20 03:51:43 -05003448 status = be_if_create(adapter, &vf_cfg->if_handle,
3449 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003450 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003451 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003452 }
Kalesh AP0700d812015-01-20 03:51:43 -05003453
3454 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003455}
3456
Sathya Perla39f1d942012-05-08 19:41:24 +00003457static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003458{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003459 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003460 int vf;
3461
Sathya Perla39f1d942012-05-08 19:41:24 +00003462 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3463 GFP_KERNEL);
3464 if (!adapter->vf_cfg)
3465 return -ENOMEM;
3466
Sathya Perla11ac75e2011-12-13 00:58:50 +00003467 for_all_vfs(adapter, vf_cfg, vf) {
3468 vf_cfg->if_handle = -1;
3469 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003470 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003471 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003472}
3473
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003474static int be_vf_setup(struct be_adapter *adapter)
3475{
Sathya Perla4c876612013-02-03 20:30:11 +00003476 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303477 struct be_vf_cfg *vf_cfg;
3478 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303479 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003480
Sathya Perla257a3fe2013-06-14 15:54:51 +05303481 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003482
3483 status = be_vf_setup_init(adapter);
3484 if (status)
3485 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003486
Sathya Perla4c876612013-02-03 20:30:11 +00003487 if (old_vfs) {
3488 for_all_vfs(adapter, vf_cfg, vf) {
3489 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3490 if (status)
3491 goto err;
3492 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003493
Sathya Perla4c876612013-02-03 20:30:11 +00003494 status = be_vfs_mac_query(adapter);
3495 if (status)
3496 goto err;
3497 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303498 status = be_vfs_if_create(adapter);
3499 if (status)
3500 goto err;
3501
Sathya Perla39f1d942012-05-08 19:41:24 +00003502 status = be_vf_eth_addr_config(adapter);
3503 if (status)
3504 goto err;
3505 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003506
Sathya Perla11ac75e2011-12-13 00:58:50 +00003507 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303508 /* Allow VFs to programs MAC/VLAN filters */
3509 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3510 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3511 status = be_cmd_set_fn_privileges(adapter,
3512 privileges |
3513 BE_PRIV_FILTMGMT,
3514 vf + 1);
3515 if (!status)
3516 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3517 vf);
3518 }
3519
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303520 /* Allow full available bandwidth */
3521 if (!old_vfs)
3522 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003523
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303524 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303525 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303526 be_cmd_set_logical_link_config(adapter,
3527 IFLA_VF_LINK_STATE_AUTO,
3528 vf+1);
3529 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003530 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003531
3532 if (!old_vfs) {
3533 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3534 if (status) {
3535 dev_err(dev, "SRIOV enable failed\n");
3536 adapter->num_vfs = 0;
3537 goto err;
3538 }
3539 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303540
3541 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003542 return 0;
3543err:
Sathya Perla4c876612013-02-03 20:30:11 +00003544 dev_err(dev, "VF setup failed\n");
3545 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003546 return status;
3547}
3548
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303549/* Converting function_mode bits on BE3 to SH mc_type enums */
3550
3551static u8 be_convert_mc_type(u32 function_mode)
3552{
Suresh Reddy66064db2014-06-23 16:41:29 +05303553 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303554 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303555 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303556 return FLEX10;
3557 else if (function_mode & VNIC_MODE)
3558 return vNIC2;
3559 else if (function_mode & UMC_ENABLED)
3560 return UMC;
3561 else
3562 return MC_NONE;
3563}
3564
Sathya Perla92bf14a2013-08-27 16:57:32 +05303565/* On BE2/BE3 FW does not suggest the supported limits */
3566static void BEx_get_resources(struct be_adapter *adapter,
3567 struct be_resources *res)
3568{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303569 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303570
3571 if (be_physfn(adapter))
3572 res->max_uc_mac = BE_UC_PMAC_COUNT;
3573 else
3574 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3575
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303576 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3577
3578 if (be_is_mc(adapter)) {
3579 /* Assuming that there are 4 channels per port,
3580 * when multi-channel is enabled
3581 */
3582 if (be_is_qnq_mode(adapter))
3583 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3584 else
3585 /* In a non-qnq multichannel mode, the pvid
3586 * takes up one vlan entry
3587 */
3588 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3589 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303590 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303591 }
3592
Sathya Perla92bf14a2013-08-27 16:57:32 +05303593 res->max_mcast_mac = BE_MAX_MC;
3594
Vasundhara Volama5243da2014-03-11 18:53:07 +05303595 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3596 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3597 * *only* if it is RSS-capable.
3598 */
3599 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3600 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303601 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303602 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303603 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3604 struct be_resources super_nic_res = {0};
3605
3606 /* On a SuperNIC profile, the driver needs to use the
3607 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3608 */
3609 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3610 /* Some old versions of BE3 FW don't report max_tx_qs value */
3611 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3612 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303613 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303614 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303615
3616 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3617 !use_sriov && be_physfn(adapter))
3618 res->max_rss_qs = (adapter->be3_native) ?
3619 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3620 res->max_rx_qs = res->max_rss_qs + 1;
3621
Suresh Reddye3dc8672014-01-06 13:02:25 +05303622 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303623 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303624 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3625 else
3626 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303627
3628 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3629 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3630 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3631}
3632
Sathya Perla30128032011-11-10 19:17:57 +00003633static void be_setup_init(struct be_adapter *adapter)
3634{
3635 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003636 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003637 adapter->if_handle = -1;
3638 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003639 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003640 if (be_physfn(adapter))
3641 adapter->cmd_privileges = MAX_PRIVILEGES;
3642 else
3643 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003644}
3645
Vasundhara Volambec84e62014-06-30 13:01:32 +05303646static int be_get_sriov_config(struct be_adapter *adapter)
3647{
3648 struct device *dev = &adapter->pdev->dev;
3649 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303650 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303651
3652 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303653 be_cmd_get_profile_config(adapter, &res, 0);
3654
Vasundhara Volambec84e62014-06-30 13:01:32 +05303655 if (BE3_chip(adapter) && !res.max_vfs) {
3656 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3657 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3658 }
3659
Sathya Perlad3d18312014-08-01 17:47:30 +05303660 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303661
3662 if (!be_max_vfs(adapter)) {
3663 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303664 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303665 adapter->num_vfs = 0;
3666 return 0;
3667 }
3668
Sathya Perlad3d18312014-08-01 17:47:30 +05303669 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3670
Vasundhara Volambec84e62014-06-30 13:01:32 +05303671 /* validate num_vfs module param */
3672 old_vfs = pci_num_vf(adapter->pdev);
3673 if (old_vfs) {
3674 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3675 if (old_vfs != num_vfs)
3676 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3677 adapter->num_vfs = old_vfs;
3678 } else {
3679 if (num_vfs > be_max_vfs(adapter)) {
3680 dev_info(dev, "Resources unavailable to init %d VFs\n",
3681 num_vfs);
3682 dev_info(dev, "Limiting to %d VFs\n",
3683 be_max_vfs(adapter));
3684 }
3685 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3686 }
3687
3688 return 0;
3689}
3690
Sathya Perla92bf14a2013-08-27 16:57:32 +05303691static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003692{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303693 struct device *dev = &adapter->pdev->dev;
3694 struct be_resources res = {0};
3695 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003696
Sathya Perla92bf14a2013-08-27 16:57:32 +05303697 if (BEx_chip(adapter)) {
3698 BEx_get_resources(adapter, &res);
3699 adapter->res = res;
3700 }
3701
Sathya Perla92bf14a2013-08-27 16:57:32 +05303702 /* For Lancer, SH etc read per-function resource limits from FW.
3703 * GET_FUNC_CONFIG returns per function guaranteed limits.
3704 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3705 */
Sathya Perla4c876612013-02-03 20:30:11 +00003706 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303707 status = be_cmd_get_func_config(adapter, &res);
3708 if (status)
3709 return status;
3710
3711 /* If RoCE may be enabled stash away half the EQs for RoCE */
3712 if (be_roce_supported(adapter))
3713 res.max_evt_qs /= 2;
3714 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003715 }
3716
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303717 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3718 be_max_txqs(adapter), be_max_rxqs(adapter),
3719 be_max_rss(adapter), be_max_eqs(adapter),
3720 be_max_vfs(adapter));
3721 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3722 be_max_uc(adapter), be_max_mc(adapter),
3723 be_max_vlans(adapter));
3724
Sathya Perla92bf14a2013-08-27 16:57:32 +05303725 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003726}
3727
Sathya Perlad3d18312014-08-01 17:47:30 +05303728static void be_sriov_config(struct be_adapter *adapter)
3729{
3730 struct device *dev = &adapter->pdev->dev;
3731 int status;
3732
3733 status = be_get_sriov_config(adapter);
3734 if (status) {
3735 dev_err(dev, "Failed to query SR-IOV configuration\n");
3736 dev_err(dev, "SR-IOV cannot be enabled\n");
3737 return;
3738 }
3739
3740 /* When the HW is in SRIOV capable configuration, the PF-pool
3741 * resources are equally distributed across the max-number of
3742 * VFs. The user may request only a subset of the max-vfs to be
3743 * enabled. Based on num_vfs, redistribute the resources across
3744 * num_vfs so that each VF will have access to more number of
3745 * resources. This facility is not available in BE3 FW.
3746 * Also, this is done by FW in Lancer chip.
3747 */
3748 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3749 status = be_cmd_set_sriov_config(adapter,
3750 adapter->pool_res,
3751 adapter->num_vfs);
3752 if (status)
3753 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3754 }
3755}
3756
Sathya Perla39f1d942012-05-08 19:41:24 +00003757static int be_get_config(struct be_adapter *adapter)
3758{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303759 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003760 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003761
Kalesh APe97e3cd2014-07-17 16:20:26 +05303762 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003763 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303764 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003765
Vasundhara Volam21252372015-02-06 08:18:42 -05003766 be_cmd_query_port_name(adapter);
3767
3768 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303769 status = be_cmd_get_active_profile(adapter, &profile_id);
3770 if (!status)
3771 dev_info(&adapter->pdev->dev,
3772 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303773 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303774
Sathya Perlad3d18312014-08-01 17:47:30 +05303775 if (!BE2_chip(adapter) && be_physfn(adapter))
3776 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303777
Sathya Perla92bf14a2013-08-27 16:57:32 +05303778 status = be_get_resources(adapter);
3779 if (status)
3780 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003781
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303782 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3783 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303784 if (!adapter->pmac_id)
3785 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003786
Sathya Perla92bf14a2013-08-27 16:57:32 +05303787 /* Sanitize cfg_num_qs based on HW and platform limits */
3788 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3789
3790 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003791}
3792
Sathya Perla95046b92013-07-23 15:25:02 +05303793static int be_mac_setup(struct be_adapter *adapter)
3794{
3795 u8 mac[ETH_ALEN];
3796 int status;
3797
3798 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3799 status = be_cmd_get_perm_mac(adapter, mac);
3800 if (status)
3801 return status;
3802
3803 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3804 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3805 } else {
3806 /* Maybe the HW was reset; dev_addr must be re-programmed */
3807 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3808 }
3809
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003810 /* For BE3-R VFs, the PF programs the initial MAC address */
3811 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3812 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3813 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303814 return 0;
3815}
3816
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303817static void be_schedule_worker(struct be_adapter *adapter)
3818{
3819 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3820 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3821}
3822
Sathya Perla77071332013-08-27 16:57:34 +05303823static int be_setup_queues(struct be_adapter *adapter)
3824{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303825 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303826 int status;
3827
3828 status = be_evt_queues_create(adapter);
3829 if (status)
3830 goto err;
3831
3832 status = be_tx_qs_create(adapter);
3833 if (status)
3834 goto err;
3835
3836 status = be_rx_cqs_create(adapter);
3837 if (status)
3838 goto err;
3839
3840 status = be_mcc_queues_create(adapter);
3841 if (status)
3842 goto err;
3843
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303844 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3845 if (status)
3846 goto err;
3847
3848 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3849 if (status)
3850 goto err;
3851
Sathya Perla77071332013-08-27 16:57:34 +05303852 return 0;
3853err:
3854 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3855 return status;
3856}
3857
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303858int be_update_queues(struct be_adapter *adapter)
3859{
3860 struct net_device *netdev = adapter->netdev;
3861 int status;
3862
3863 if (netif_running(netdev))
3864 be_close(netdev);
3865
3866 be_cancel_worker(adapter);
3867
3868 /* If any vectors have been shared with RoCE we cannot re-program
3869 * the MSIx table.
3870 */
3871 if (!adapter->num_msix_roce_vec)
3872 be_msix_disable(adapter);
3873
3874 be_clear_queues(adapter);
3875
3876 if (!msix_enabled(adapter)) {
3877 status = be_msix_enable(adapter);
3878 if (status)
3879 return status;
3880 }
3881
3882 status = be_setup_queues(adapter);
3883 if (status)
3884 return status;
3885
3886 be_schedule_worker(adapter);
3887
3888 if (netif_running(netdev))
3889 status = be_open(netdev);
3890
3891 return status;
3892}
3893
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003894static inline int fw_major_num(const char *fw_ver)
3895{
3896 int fw_major = 0, i;
3897
3898 i = sscanf(fw_ver, "%d.", &fw_major);
3899 if (i != 1)
3900 return 0;
3901
3902 return fw_major;
3903}
3904
Sathya Perla5fb379e2009-06-18 00:02:59 +00003905static int be_setup(struct be_adapter *adapter)
3906{
Sathya Perla39f1d942012-05-08 19:41:24 +00003907 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003908 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909
Sathya Perla30128032011-11-10 19:17:57 +00003910 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003911
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003912 if (!lancer_chip(adapter))
3913 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003914
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003915 status = be_get_config(adapter);
3916 if (status)
3917 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003918
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003919 status = be_msix_enable(adapter);
3920 if (status)
3921 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003922
Kalesh AP0700d812015-01-20 03:51:43 -05003923 status = be_if_create(adapter, &adapter->if_handle,
3924 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003925 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003926 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003927
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303928 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3929 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303930 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303931 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003932 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003933 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003934
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003935 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003936
Sathya Perla95046b92013-07-23 15:25:02 +05303937 status = be_mac_setup(adapter);
3938 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003939 goto err;
3940
Kalesh APe97e3cd2014-07-17 16:20:26 +05303941 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303942 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003943
Somnath Koture9e2a902013-10-24 14:37:53 +05303944 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303945 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303946 adapter->fw_ver);
3947 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3948 }
3949
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003950 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003951 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003952
3953 be_set_rx_mode(adapter->netdev);
3954
Suresh Reddy76a9e082014-01-15 13:23:40 +05303955 be_cmd_get_acpi_wol_cap(adapter);
3956
Kalesh AP00d594c2015-01-20 03:51:44 -05003957 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3958 adapter->rx_fc);
3959 if (status)
3960 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3961 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003962
Kalesh AP00d594c2015-01-20 03:51:44 -05003963 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3964 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003965
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303966 if (be_physfn(adapter))
3967 be_cmd_set_logical_link_config(adapter,
3968 IFLA_VF_LINK_STATE_AUTO, 0);
3969
Vasundhara Volambec84e62014-06-30 13:01:32 +05303970 if (adapter->num_vfs)
3971 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003972
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003973 status = be_cmd_get_phy_info(adapter);
3974 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003975 adapter->phy.fc_autoneg = 1;
3976
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303977 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303978 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003979 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003980err:
3981 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003982 return status;
3983}
3984
Ivan Vecera66268732011-12-08 01:31:21 +00003985#ifdef CONFIG_NET_POLL_CONTROLLER
3986static void be_netpoll(struct net_device *netdev)
3987{
3988 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003989 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003990 int i;
3991
Sathya Perlae49cc342012-11-27 19:50:02 +00003992 for_all_evt_queues(adapter, eqo, i) {
3993 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3994 napi_schedule(&eqo->napi);
3995 }
Ivan Vecera66268732011-12-08 01:31:21 +00003996}
3997#endif
3998
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303999static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004000
Sathya Perla306f1342011-08-02 19:57:45 +00004001static bool phy_flashing_required(struct be_adapter *adapter)
4002{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004003 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004004 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004005}
4006
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004007static bool is_comp_in_ufi(struct be_adapter *adapter,
4008 struct flash_section_info *fsec, int type)
4009{
4010 int i = 0, img_type = 0;
4011 struct flash_section_info_g2 *fsec_g2 = NULL;
4012
Sathya Perlaca34fe32012-11-06 17:48:56 +00004013 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004014 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4015
4016 for (i = 0; i < MAX_FLASH_COMP; i++) {
4017 if (fsec_g2)
4018 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4019 else
4020 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4021
4022 if (img_type == type)
4023 return true;
4024 }
4025 return false;
4026
4027}
4028
Jingoo Han4188e7d2013-08-05 18:02:02 +09004029static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304030 int header_size,
4031 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004032{
4033 struct flash_section_info *fsec = NULL;
4034 const u8 *p = fw->data;
4035
4036 p += header_size;
4037 while (p < (fw->data + fw->size)) {
4038 fsec = (struct flash_section_info *)p;
4039 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4040 return fsec;
4041 p += 32;
4042 }
4043 return NULL;
4044}
4045
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304046static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4047 u32 img_offset, u32 img_size, int hdr_size,
4048 u16 img_optype, bool *crc_match)
4049{
4050 u32 crc_offset;
4051 int status;
4052 u8 crc[4];
4053
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004054 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4055 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304056 if (status)
4057 return status;
4058
4059 crc_offset = hdr_size + img_offset + img_size - 4;
4060
4061 /* Skip flashing, if crc of flashed region matches */
4062 if (!memcmp(crc, p + crc_offset, 4))
4063 *crc_match = true;
4064 else
4065 *crc_match = false;
4066
4067 return status;
4068}
4069
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004070static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004071 struct be_dma_mem *flash_cmd, int optype, int img_size,
4072 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004073{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004074 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004075 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304076 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004077
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004078 while (total_bytes) {
4079 num_bytes = min_t(u32, 32*1024, total_bytes);
4080
4081 total_bytes -= num_bytes;
4082
4083 if (!total_bytes) {
4084 if (optype == OPTYPE_PHY_FW)
4085 flash_op = FLASHROM_OPER_PHY_FLASH;
4086 else
4087 flash_op = FLASHROM_OPER_FLASH;
4088 } else {
4089 if (optype == OPTYPE_PHY_FW)
4090 flash_op = FLASHROM_OPER_PHY_SAVE;
4091 else
4092 flash_op = FLASHROM_OPER_SAVE;
4093 }
4094
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004095 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004096 img += num_bytes;
4097 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004098 flash_op, img_offset +
4099 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304100 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304101 optype == OPTYPE_PHY_FW)
4102 break;
4103 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004104 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004105
4106 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004107 }
4108 return 0;
4109}
4110
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004111/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004112static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304113 const struct firmware *fw,
4114 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004115{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004116 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304117 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004118 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304119 int status, i, filehdr_size, num_comp;
4120 const struct flash_comp *pflashcomp;
4121 bool crc_match;
4122 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004123
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004124 struct flash_comp gen3_flash_types[] = {
4125 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4126 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4127 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4128 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4129 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4130 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4131 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4132 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4133 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4134 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4135 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4136 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4137 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4138 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4139 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4140 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4141 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4142 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4143 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4144 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004145 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004146
4147 struct flash_comp gen2_flash_types[] = {
4148 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4149 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4150 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4151 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4152 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4153 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4154 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4155 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4156 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4157 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4158 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4159 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4160 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4161 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4162 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4163 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004164 };
4165
Sathya Perlaca34fe32012-11-06 17:48:56 +00004166 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004167 pflashcomp = gen3_flash_types;
4168 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004169 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004170 } else {
4171 pflashcomp = gen2_flash_types;
4172 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004173 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004174 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004175 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004176
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004177 /* Get flash section info*/
4178 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4179 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304180 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004181 return -1;
4182 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004183 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004184 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004185 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004186
4187 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4188 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4189 continue;
4190
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004191 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4192 !phy_flashing_required(adapter))
4193 continue;
4194
4195 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304196 status = be_check_flash_crc(adapter, fw->data,
4197 pflashcomp[i].offset,
4198 pflashcomp[i].size,
4199 filehdr_size +
4200 img_hdrs_size,
4201 OPTYPE_REDBOOT, &crc_match);
4202 if (status) {
4203 dev_err(dev,
4204 "Could not get CRC for 0x%x region\n",
4205 pflashcomp[i].optype);
4206 continue;
4207 }
4208
4209 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004210 continue;
4211 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004212
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304213 p = fw->data + filehdr_size + pflashcomp[i].offset +
4214 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004215 if (p + pflashcomp[i].size > fw->data + fw->size)
4216 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004217
4218 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004219 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004220 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304221 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004222 pflashcomp[i].img_type);
4223 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004224 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004225 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004226 return 0;
4227}
4228
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304229static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4230{
4231 u32 img_type = le32_to_cpu(fsec_entry.type);
4232 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4233
4234 if (img_optype != 0xFFFF)
4235 return img_optype;
4236
4237 switch (img_type) {
4238 case IMAGE_FIRMWARE_iSCSI:
4239 img_optype = OPTYPE_ISCSI_ACTIVE;
4240 break;
4241 case IMAGE_BOOT_CODE:
4242 img_optype = OPTYPE_REDBOOT;
4243 break;
4244 case IMAGE_OPTION_ROM_ISCSI:
4245 img_optype = OPTYPE_BIOS;
4246 break;
4247 case IMAGE_OPTION_ROM_PXE:
4248 img_optype = OPTYPE_PXE_BIOS;
4249 break;
4250 case IMAGE_OPTION_ROM_FCoE:
4251 img_optype = OPTYPE_FCOE_BIOS;
4252 break;
4253 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4254 img_optype = OPTYPE_ISCSI_BACKUP;
4255 break;
4256 case IMAGE_NCSI:
4257 img_optype = OPTYPE_NCSI_FW;
4258 break;
4259 case IMAGE_FLASHISM_JUMPVECTOR:
4260 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4261 break;
4262 case IMAGE_FIRMWARE_PHY:
4263 img_optype = OPTYPE_SH_PHY_FW;
4264 break;
4265 case IMAGE_REDBOOT_DIR:
4266 img_optype = OPTYPE_REDBOOT_DIR;
4267 break;
4268 case IMAGE_REDBOOT_CONFIG:
4269 img_optype = OPTYPE_REDBOOT_CONFIG;
4270 break;
4271 case IMAGE_UFI_DIR:
4272 img_optype = OPTYPE_UFI_DIR;
4273 break;
4274 default:
4275 break;
4276 }
4277
4278 return img_optype;
4279}
4280
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004281static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304282 const struct firmware *fw,
4283 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004284{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004285 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004286 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304287 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004288 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304289 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004290 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304291 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304292 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004293
4294 filehdr_size = sizeof(struct flash_file_hdr_g3);
4295 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4296 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304297 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304298 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004299 }
4300
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004301retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004302 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4303 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4304 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304305 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4306 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4307 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004308
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304309 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004310 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004311
4312 if (flash_offset_support)
4313 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4314 else
4315 flash_optype = img_optype;
4316
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304317 /* Don't bother verifying CRC if an old FW image is being
4318 * flashed
4319 */
4320 if (old_fw_img)
4321 goto flash;
4322
4323 status = be_check_flash_crc(adapter, fw->data, img_offset,
4324 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004325 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304326 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304327 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4328 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004329 /* The current FW image on the card does not support
4330 * OFFSET based flashing. Retry using older mechanism
4331 * of OPTYPE based flashing
4332 */
4333 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4334 flash_offset_support = false;
4335 goto retry_flash;
4336 }
4337
4338 /* The current FW image on the card does not recognize
4339 * the new FLASH op_type. The FW download is partially
4340 * complete. Reboot the server now to enable FW image
4341 * to recognize the new FLASH op_type. To complete the
4342 * remaining process, download the same FW again after
4343 * the reboot.
4344 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304345 dev_err(dev, "Flash incomplete. Reset the server\n");
4346 dev_err(dev, "Download FW image again after reset\n");
4347 return -EAGAIN;
4348 } else if (status) {
4349 dev_err(dev, "Could not get CRC for 0x%x region\n",
4350 img_optype);
4351 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004352 }
4353
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304354 if (crc_match)
4355 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004356
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304357flash:
4358 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004359 if (p + img_size > fw->data + fw->size)
4360 return -1;
4361
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004362 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4363 img_offset);
4364
4365 /* The current FW image on the card does not support OFFSET
4366 * based flashing. Retry using older mechanism of OPTYPE based
4367 * flashing
4368 */
4369 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4370 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4371 flash_offset_support = false;
4372 goto retry_flash;
4373 }
4374
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304375 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4376 * UFI_DIR region
4377 */
Kalesh AP4c600052014-05-30 19:06:26 +05304378 if (old_fw_img &&
4379 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4380 (img_optype == OPTYPE_UFI_DIR &&
4381 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304382 continue;
4383 } else if (status) {
4384 dev_err(dev, "Flashing section type 0x%x failed\n",
4385 img_type);
4386 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004387 }
4388 }
4389 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004390}
4391
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004392static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304393 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004394{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004395#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4396#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304397 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004398 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004399 const u8 *data_ptr = NULL;
4400 u8 *dest_image_ptr = NULL;
4401 size_t image_size = 0;
4402 u32 chunk_size = 0;
4403 u32 data_written = 0;
4404 u32 offset = 0;
4405 int status = 0;
4406 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004407 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004408
4409 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304410 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304411 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004412 }
4413
4414 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4415 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304416 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004417 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304418 if (!flash_cmd.va)
4419 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004420
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004421 dest_image_ptr = flash_cmd.va +
4422 sizeof(struct lancer_cmd_req_write_object);
4423 image_size = fw->size;
4424 data_ptr = fw->data;
4425
4426 while (image_size) {
4427 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4428
4429 /* Copy the image chunk content. */
4430 memcpy(dest_image_ptr, data_ptr, chunk_size);
4431
4432 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004433 chunk_size, offset,
4434 LANCER_FW_DOWNLOAD_LOCATION,
4435 &data_written, &change_status,
4436 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004437 if (status)
4438 break;
4439
4440 offset += data_written;
4441 data_ptr += data_written;
4442 image_size -= data_written;
4443 }
4444
4445 if (!status) {
4446 /* Commit the FW written */
4447 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004448 0, offset,
4449 LANCER_FW_DOWNLOAD_LOCATION,
4450 &data_written, &change_status,
4451 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004452 }
4453
Kalesh APbb864e02014-09-02 09:56:51 +05304454 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004455 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304456 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304457 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004458 }
4459
Kalesh APbb864e02014-09-02 09:56:51 +05304460 dev_info(dev, "Firmware flashed successfully\n");
4461
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004462 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304463 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004464 status = lancer_physdev_ctrl(adapter,
4465 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004466 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304467 dev_err(dev, "Adapter busy, could not reset FW\n");
4468 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004469 }
4470 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304471 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004472 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304473
4474 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004475}
4476
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004477#define BE2_UFI 2
4478#define BE3_UFI 3
4479#define BE3R_UFI 10
4480#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004481#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004482
Sathya Perlaca34fe32012-11-06 17:48:56 +00004483static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004484 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004485{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004486 if (!fhdr) {
4487 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4488 return -1;
4489 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004490
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004491 /* First letter of the build version is used to identify
4492 * which chip this image file is meant for.
4493 */
4494 switch (fhdr->build[0]) {
4495 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004496 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4497 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004498 case BLD_STR_UFI_TYPE_BE3:
4499 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4500 BE3_UFI;
4501 case BLD_STR_UFI_TYPE_BE2:
4502 return BE2_UFI;
4503 default:
4504 return -1;
4505 }
4506}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004507
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004508/* Check if the flash image file is compatible with the adapter that
4509 * is being flashed.
4510 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004511 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004512 */
4513static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4514 struct flash_file_hdr_g3 *fhdr)
4515{
4516 int ufi_type = be_get_ufi_type(adapter, fhdr);
4517
4518 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004519 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004520 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004521 case SH_UFI:
4522 return (skyhawk_chip(adapter) &&
4523 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004524 case BE3R_UFI:
4525 return BE3_chip(adapter);
4526 case BE3_UFI:
4527 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4528 case BE2_UFI:
4529 return BE2_chip(adapter);
4530 default:
4531 return false;
4532 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004533}
4534
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004535static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4536{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004537 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004538 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004539 struct image_hdr *img_hdr_ptr;
4540 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004541 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004542
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004543 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4544 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4545 dev_err(dev, "Flash image is not compatible with adapter\n");
4546 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004547 }
4548
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004549 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4550 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4551 GFP_KERNEL);
4552 if (!flash_cmd.va)
4553 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004554
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004555 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4556 for (i = 0; i < num_imgs; i++) {
4557 img_hdr_ptr = (struct image_hdr *)(fw->data +
4558 (sizeof(struct flash_file_hdr_g3) +
4559 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004560 if (!BE2_chip(adapter) &&
4561 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4562 continue;
4563
4564 if (skyhawk_chip(adapter))
4565 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4566 num_imgs);
4567 else
4568 status = be_flash_BEx(adapter, fw, &flash_cmd,
4569 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004570 }
4571
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004572 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4573 if (!status)
4574 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004575
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004576 return status;
4577}
4578
4579int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4580{
4581 const struct firmware *fw;
4582 int status;
4583
4584 if (!netif_running(adapter->netdev)) {
4585 dev_err(&adapter->pdev->dev,
4586 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304587 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004588 }
4589
4590 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4591 if (status)
4592 goto fw_exit;
4593
4594 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4595
4596 if (lancer_chip(adapter))
4597 status = lancer_fw_download(adapter, fw);
4598 else
4599 status = be_fw_download(adapter, fw);
4600
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004601 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304602 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004603
Ajit Khaparde84517482009-09-04 03:12:16 +00004604fw_exit:
4605 release_firmware(fw);
4606 return status;
4607}
4608
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004609static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4610 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004611{
4612 struct be_adapter *adapter = netdev_priv(dev);
4613 struct nlattr *attr, *br_spec;
4614 int rem;
4615 int status = 0;
4616 u16 mode = 0;
4617
4618 if (!sriov_enabled(adapter))
4619 return -EOPNOTSUPP;
4620
4621 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004622 if (!br_spec)
4623 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004624
4625 nla_for_each_nested(attr, br_spec, rem) {
4626 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4627 continue;
4628
Thomas Grafb7c1a312014-11-26 13:42:17 +01004629 if (nla_len(attr) < sizeof(mode))
4630 return -EINVAL;
4631
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004632 mode = nla_get_u16(attr);
4633 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4634 return -EINVAL;
4635
4636 status = be_cmd_set_hsw_config(adapter, 0, 0,
4637 adapter->if_handle,
4638 mode == BRIDGE_MODE_VEPA ?
4639 PORT_FWD_TYPE_VEPA :
4640 PORT_FWD_TYPE_VEB);
4641 if (status)
4642 goto err;
4643
4644 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4645 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4646
4647 return status;
4648 }
4649err:
4650 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4651 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4652
4653 return status;
4654}
4655
4656static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304657 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004658{
4659 struct be_adapter *adapter = netdev_priv(dev);
4660 int status = 0;
4661 u8 hsw_mode;
4662
4663 if (!sriov_enabled(adapter))
4664 return 0;
4665
4666 /* BE and Lancer chips support VEB mode only */
4667 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4668 hsw_mode = PORT_FWD_TYPE_VEB;
4669 } else {
4670 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4671 adapter->if_handle, &hsw_mode);
4672 if (status)
4673 return 0;
4674 }
4675
4676 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4677 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004678 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4679 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004680}
4681
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304682#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004683/* VxLAN offload Notes:
4684 *
4685 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4686 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4687 * is expected to work across all types of IP tunnels once exported. Skyhawk
4688 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304689 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4690 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4691 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004692 *
4693 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4694 * adds more than one port, disable offloads and don't re-enable them again
4695 * until after all the tunnels are removed.
4696 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304697static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4698 __be16 port)
4699{
4700 struct be_adapter *adapter = netdev_priv(netdev);
4701 struct device *dev = &adapter->pdev->dev;
4702 int status;
4703
4704 if (lancer_chip(adapter) || BEx_chip(adapter))
4705 return;
4706
4707 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304708 dev_info(dev,
4709 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004710 dev_info(dev, "Disabling VxLAN offloads\n");
4711 adapter->vxlan_port_count++;
4712 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304713 }
4714
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004715 if (adapter->vxlan_port_count++ >= 1)
4716 return;
4717
Sathya Perlac9c47142014-03-27 10:46:19 +05304718 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4719 OP_CONVERT_NORMAL_TO_TUNNEL);
4720 if (status) {
4721 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4722 goto err;
4723 }
4724
4725 status = be_cmd_set_vxlan_port(adapter, port);
4726 if (status) {
4727 dev_warn(dev, "Failed to add VxLAN port\n");
4728 goto err;
4729 }
4730 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4731 adapter->vxlan_port = port;
4732
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004733 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4734 NETIF_F_TSO | NETIF_F_TSO6 |
4735 NETIF_F_GSO_UDP_TUNNEL;
4736 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304737 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004738
Sathya Perlac9c47142014-03-27 10:46:19 +05304739 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4740 be16_to_cpu(port));
4741 return;
4742err:
4743 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304744}
4745
4746static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4747 __be16 port)
4748{
4749 struct be_adapter *adapter = netdev_priv(netdev);
4750
4751 if (lancer_chip(adapter) || BEx_chip(adapter))
4752 return;
4753
4754 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004755 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304756
4757 be_disable_vxlan_offloads(adapter);
4758
4759 dev_info(&adapter->pdev->dev,
4760 "Disabled VxLAN offloads for UDP port %d\n",
4761 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004762done:
4763 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304764}
Joe Stringer725d5482014-11-13 16:38:13 -08004765
Jesse Gross5f352272014-12-23 22:37:26 -08004766static netdev_features_t be_features_check(struct sk_buff *skb,
4767 struct net_device *dev,
4768 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004769{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304770 struct be_adapter *adapter = netdev_priv(dev);
4771 u8 l4_hdr = 0;
4772
4773 /* The code below restricts offload features for some tunneled packets.
4774 * Offload features for normal (non tunnel) packets are unchanged.
4775 */
4776 if (!skb->encapsulation ||
4777 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4778 return features;
4779
4780 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4781 * should disable tunnel offload features if it's not a VxLAN packet,
4782 * as tunnel offloads have been enabled only for VxLAN. This is done to
4783 * allow other tunneled traffic like GRE work fine while VxLAN
4784 * offloads are configured in Skyhawk-R.
4785 */
4786 switch (vlan_get_protocol(skb)) {
4787 case htons(ETH_P_IP):
4788 l4_hdr = ip_hdr(skb)->protocol;
4789 break;
4790 case htons(ETH_P_IPV6):
4791 l4_hdr = ipv6_hdr(skb)->nexthdr;
4792 break;
4793 default:
4794 return features;
4795 }
4796
4797 if (l4_hdr != IPPROTO_UDP ||
4798 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4799 skb->inner_protocol != htons(ETH_P_TEB) ||
4800 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4801 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4802 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4803
4804 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004805}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304806#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304807
stephen hemmingere5686ad2012-01-05 19:10:25 +00004808static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004809 .ndo_open = be_open,
4810 .ndo_stop = be_close,
4811 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004812 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004813 .ndo_set_mac_address = be_mac_addr_set,
4814 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004815 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004816 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004817 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4818 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004819 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004820 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004821 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004822 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304823 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004824#ifdef CONFIG_NET_POLL_CONTROLLER
4825 .ndo_poll_controller = be_netpoll,
4826#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004827 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4828 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304829#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304830 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304831#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304832#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304833 .ndo_add_vxlan_port = be_add_vxlan_port,
4834 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004835 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304836#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004837};
4838
4839static void be_netdev_init(struct net_device *netdev)
4840{
4841 struct be_adapter *adapter = netdev_priv(netdev);
4842
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004843 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004844 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004845 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004846 if (be_multi_rxq(adapter))
4847 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004848
4849 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004850 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004851
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004852 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004853 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004854
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004855 netdev->priv_flags |= IFF_UNICAST_FLT;
4856
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004857 netdev->flags |= IFF_MULTICAST;
4858
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004859 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004860
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004861 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004862
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004863 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004864}
4865
4866static void be_unmap_pci_bars(struct be_adapter *adapter)
4867{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004868 if (adapter->csr)
4869 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004870 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004871 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004872}
4873
Sathya Perlace66f782012-11-06 17:48:58 +00004874static int db_bar(struct be_adapter *adapter)
4875{
4876 if (lancer_chip(adapter) || !be_physfn(adapter))
4877 return 0;
4878 else
4879 return 4;
4880}
4881
4882static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004883{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004884 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004885 adapter->roce_db.size = 4096;
4886 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4887 db_bar(adapter));
4888 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4889 db_bar(adapter));
4890 }
Parav Pandit045508a2012-03-26 14:27:13 +00004891 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004892}
4893
4894static int be_map_pci_bars(struct be_adapter *adapter)
4895{
4896 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004897
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004898 if (BEx_chip(adapter) && be_physfn(adapter)) {
4899 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304900 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004901 return -ENOMEM;
4902 }
4903
Sathya Perlace66f782012-11-06 17:48:58 +00004904 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304905 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004906 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004907 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004908
4909 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004910 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004911
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004912pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304913 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004914 be_unmap_pci_bars(adapter);
4915 return -ENOMEM;
4916}
4917
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004918static void be_ctrl_cleanup(struct be_adapter *adapter)
4919{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004920 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004921
4922 be_unmap_pci_bars(adapter);
4923
4924 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004925 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4926 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004927
Sathya Perla5b8821b2011-08-02 19:57:44 +00004928 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004929 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004930 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4931 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004932}
4933
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004934static int be_ctrl_init(struct be_adapter *adapter)
4935{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004936 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4937 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004938 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004939 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004940 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004941
Sathya Perlace66f782012-11-06 17:48:58 +00004942 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4943 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4944 SLI_INTF_FAMILY_SHIFT;
4945 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4946
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004947 status = be_map_pci_bars(adapter);
4948 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004949 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004950
4951 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004952 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4953 mbox_mem_alloc->size,
4954 &mbox_mem_alloc->dma,
4955 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004956 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004957 status = -ENOMEM;
4958 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004959 }
4960 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4961 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4962 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4963 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004964
Sathya Perla5b8821b2011-08-02 19:57:44 +00004965 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004966 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4967 rx_filter->size, &rx_filter->dma,
4968 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304969 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004970 status = -ENOMEM;
4971 goto free_mbox;
4972 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004973
Ivan Vecera29849612010-12-14 05:43:19 +00004974 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004975 spin_lock_init(&adapter->mcc_lock);
4976 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004977
Suresh Reddy5eeff632014-01-06 13:02:24 +05304978 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004979 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004980 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004981
4982free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004983 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4984 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004985
4986unmap_pci_bars:
4987 be_unmap_pci_bars(adapter);
4988
4989done:
4990 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004991}
4992
4993static void be_stats_cleanup(struct be_adapter *adapter)
4994{
Sathya Perla3abcded2010-10-03 22:12:27 -07004995 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004996
4997 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004998 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4999 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005000}
5001
5002static int be_stats_init(struct be_adapter *adapter)
5003{
Sathya Perla3abcded2010-10-03 22:12:27 -07005004 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005005
Sathya Perlaca34fe32012-11-06 17:48:56 +00005006 if (lancer_chip(adapter))
5007 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5008 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00005009 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05005010 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00005011 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05005012 else
5013 /* ALL non-BE ASICs */
5014 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00005015
Joe Perchesede23fa82013-08-26 22:45:23 -07005016 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
5017 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05305018 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05305019 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005020 return 0;
5021}
5022
Bill Pemberton3bc6b062012-12-03 09:23:09 -05005023static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005024{
5025 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00005026
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005027 if (!adapter)
5028 return;
5029
Parav Pandit045508a2012-03-26 14:27:13 +00005030 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005031 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00005032
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005033 cancel_delayed_work_sync(&adapter->func_recovery_work);
5034
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005035 unregister_netdev(adapter->netdev);
5036
Sathya Perla5fb379e2009-06-18 00:02:59 +00005037 be_clear(adapter);
5038
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005039 /* tell fw we're done with firing cmds */
5040 be_cmd_fw_clean(adapter);
5041
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005042 be_stats_cleanup(adapter);
5043
5044 be_ctrl_cleanup(adapter);
5045
Sathya Perlad6b6d982012-09-05 01:56:48 +00005046 pci_disable_pcie_error_reporting(pdev);
5047
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005048 pci_release_regions(pdev);
5049 pci_disable_device(pdev);
5050
5051 free_netdev(adapter->netdev);
5052}
5053
Sathya Perla39f1d942012-05-08 19:41:24 +00005054static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005055{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05305056 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00005057
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00005058 status = be_cmd_get_cntl_attributes(adapter);
5059 if (status)
5060 return status;
5061
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005062 /* Must be a power of 2 or else MODULO will BUG_ON */
5063 adapter->be_get_temp_freq = 64;
5064
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05305065 if (BEx_chip(adapter)) {
5066 level = be_cmd_get_fw_log_level(adapter);
5067 adapter->msg_enable =
5068 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
5069 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00005070
Sathya Perla92bf14a2013-08-27 16:57:32 +05305071 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00005072 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005073}
5074
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005075static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005076{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005077 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005078 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005079
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005080 status = lancer_test_and_set_rdy_state(adapter);
5081 if (status)
5082 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005083
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005084 if (netif_running(adapter->netdev))
5085 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005086
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005087 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005088
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005089 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005090
5091 status = be_setup(adapter);
5092 if (status)
5093 goto err;
5094
5095 if (netif_running(adapter->netdev)) {
5096 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005097 if (status)
5098 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005099 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005100
Somnath Kotur4bebb562013-12-05 12:07:55 +05305101 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005102 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005103err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005104 if (status == -EAGAIN)
5105 dev_err(dev, "Waiting for resource provisioning\n");
5106 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05305107 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005108
5109 return status;
5110}
5111
5112static void be_func_recovery_task(struct work_struct *work)
5113{
5114 struct be_adapter *adapter =
5115 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005116 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005117
5118 be_detect_error(adapter);
5119
5120 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005121 rtnl_lock();
5122 netif_device_detach(adapter->netdev);
5123 rtnl_unlock();
5124
5125 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005126 if (!status)
5127 netif_device_attach(adapter->netdev);
5128 }
5129
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005130 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5131 * no need to attempt further recovery.
5132 */
5133 if (!status || status == -EAGAIN)
5134 schedule_delayed_work(&adapter->func_recovery_work,
5135 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005136}
5137
Vasundhara Volam21252372015-02-06 08:18:42 -05005138static void be_log_sfp_info(struct be_adapter *adapter)
5139{
5140 int status;
5141
5142 status = be_cmd_query_sfp_info(adapter);
5143 if (!status) {
5144 dev_err(&adapter->pdev->dev,
5145 "Unqualified SFP+ detected on %c from %s part no: %s",
5146 adapter->port_name, adapter->phy.vendor_name,
5147 adapter->phy.vendor_pn);
5148 }
5149 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5150}
5151
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005152static void be_worker(struct work_struct *work)
5153{
5154 struct be_adapter *adapter =
5155 container_of(work, struct be_adapter, work.work);
5156 struct be_rx_obj *rxo;
5157 int i;
5158
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005159 /* when interrupts are not yet enabled, just reap any pending
5160 * mcc completions */
5161 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005162 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005163 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005164 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005165 goto reschedule;
5166 }
5167
5168 if (!adapter->stats_cmd_sent) {
5169 if (lancer_chip(adapter))
5170 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305171 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005172 else
5173 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5174 }
5175
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305176 if (be_physfn(adapter) &&
5177 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005178 be_cmd_get_die_temperature(adapter);
5179
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005180 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305181 /* Replenish RX-queues starved due to memory
5182 * allocation failures.
5183 */
5184 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305185 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005186 }
5187
Sathya Perla2632baf2013-10-01 16:00:00 +05305188 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005189
Vasundhara Volam21252372015-02-06 08:18:42 -05005190 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5191 be_log_sfp_info(adapter);
5192
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005193reschedule:
5194 adapter->work_counter++;
5195 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5196}
5197
Sathya Perla257a3fe2013-06-14 15:54:51 +05305198/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00005199static bool be_reset_required(struct be_adapter *adapter)
5200{
Sathya Perla257a3fe2013-06-14 15:54:51 +05305201 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00005202}
5203
Sathya Perlad3791422012-09-28 04:39:44 +00005204static char *mc_name(struct be_adapter *adapter)
5205{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305206 char *str = ""; /* default */
5207
5208 switch (adapter->mc_type) {
5209 case UMC:
5210 str = "UMC";
5211 break;
5212 case FLEX10:
5213 str = "FLEX10";
5214 break;
5215 case vNIC1:
5216 str = "vNIC-1";
5217 break;
5218 case nPAR:
5219 str = "nPAR";
5220 break;
5221 case UFP:
5222 str = "UFP";
5223 break;
5224 case vNIC2:
5225 str = "vNIC-2";
5226 break;
5227 default:
5228 str = "";
5229 }
5230
5231 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005232}
5233
5234static inline char *func_name(struct be_adapter *adapter)
5235{
5236 return be_physfn(adapter) ? "PF" : "VF";
5237}
5238
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005239static inline char *nic_name(struct pci_dev *pdev)
5240{
5241 switch (pdev->device) {
5242 case OC_DEVICE_ID1:
5243 return OC_NAME;
5244 case OC_DEVICE_ID2:
5245 return OC_NAME_BE;
5246 case OC_DEVICE_ID3:
5247 case OC_DEVICE_ID4:
5248 return OC_NAME_LANCER;
5249 case BE_DEVICE_ID2:
5250 return BE3_NAME;
5251 case OC_DEVICE_ID5:
5252 case OC_DEVICE_ID6:
5253 return OC_NAME_SH;
5254 default:
5255 return BE_NAME;
5256 }
5257}
5258
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005259static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005260{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005261 struct be_adapter *adapter;
5262 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005263 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005264
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305265 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005267 status = pci_enable_device(pdev);
5268 if (status)
5269 goto do_none;
5270
5271 status = pci_request_regions(pdev, DRV_NAME);
5272 if (status)
5273 goto disable_dev;
5274 pci_set_master(pdev);
5275
Sathya Perla7f640062012-06-05 19:37:20 +00005276 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305277 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005278 status = -ENOMEM;
5279 goto rel_reg;
5280 }
5281 adapter = netdev_priv(netdev);
5282 adapter->pdev = pdev;
5283 pci_set_drvdata(pdev, adapter);
5284 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005285 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005286
Russell King4c15c242013-06-26 23:49:11 +01005287 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005288 if (!status) {
5289 netdev->features |= NETIF_F_HIGHDMA;
5290 } else {
Russell King4c15c242013-06-26 23:49:11 +01005291 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005292 if (status) {
5293 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5294 goto free_netdev;
5295 }
5296 }
5297
Kalesh AP2f951a92014-09-12 17:39:21 +05305298 status = pci_enable_pcie_error_reporting(pdev);
5299 if (!status)
5300 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005302 status = be_ctrl_init(adapter);
5303 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005304 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005305
Sathya Perla2243e2e2009-11-22 22:02:03 +00005306 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005307 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005308 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005309 if (status)
5310 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005311 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00005312
Sathya Perla39f1d942012-05-08 19:41:24 +00005313 if (be_reset_required(adapter)) {
5314 status = be_cmd_reset_function(adapter);
5315 if (status)
5316 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07005317
Kalesh AP2d177be2013-04-28 22:22:29 +00005318 /* Wait for interrupts to quiesce after an FLR */
5319 msleep(100);
5320 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005321
5322 /* Allow interrupts for other ULPs running on NIC function */
5323 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005324
Kalesh AP2d177be2013-04-28 22:22:29 +00005325 /* tell fw we're ready to fire cmds */
5326 status = be_cmd_fw_init(adapter);
5327 if (status)
5328 goto ctrl_clean;
5329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005330 status = be_stats_init(adapter);
5331 if (status)
5332 goto ctrl_clean;
5333
Sathya Perla39f1d942012-05-08 19:41:24 +00005334 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005335 if (status)
5336 goto stats_clean;
5337
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005338 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005339 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05305340 adapter->rx_fc = true;
5341 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005342
Sathya Perla5fb379e2009-06-18 00:02:59 +00005343 status = be_setup(adapter);
5344 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00005345 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005346
Sathya Perla3abcded2010-10-03 22:12:27 -07005347 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005348 status = register_netdev(netdev);
5349 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005350 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005351
Parav Pandit045508a2012-03-26 14:27:13 +00005352 be_roce_dev_add(adapter);
5353
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005354 schedule_delayed_work(&adapter->func_recovery_work,
5355 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005356
Sathya Perlad3791422012-09-28 04:39:44 +00005357 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005358 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005360 return 0;
5361
Sathya Perla5fb379e2009-06-18 00:02:59 +00005362unsetup:
5363 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005364stats_clean:
5365 be_stats_cleanup(adapter);
5366ctrl_clean:
5367 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005368free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005369 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005370rel_reg:
5371 pci_release_regions(pdev);
5372disable_dev:
5373 pci_disable_device(pdev);
5374do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005375 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005376 return status;
5377}
5378
5379static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5380{
5381 struct be_adapter *adapter = pci_get_drvdata(pdev);
5382 struct net_device *netdev = adapter->netdev;
5383
Suresh Reddy76a9e082014-01-15 13:23:40 +05305384 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005385 be_setup_wol(adapter, true);
5386
Ajit Khaparded4360d62013-11-22 12:51:09 -06005387 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005388 cancel_delayed_work_sync(&adapter->func_recovery_work);
5389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005390 netif_device_detach(netdev);
5391 if (netif_running(netdev)) {
5392 rtnl_lock();
5393 be_close(netdev);
5394 rtnl_unlock();
5395 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005396 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005397
5398 pci_save_state(pdev);
5399 pci_disable_device(pdev);
5400 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5401 return 0;
5402}
5403
5404static int be_resume(struct pci_dev *pdev)
5405{
5406 int status = 0;
5407 struct be_adapter *adapter = pci_get_drvdata(pdev);
5408 struct net_device *netdev = adapter->netdev;
5409
5410 netif_device_detach(netdev);
5411
5412 status = pci_enable_device(pdev);
5413 if (status)
5414 return status;
5415
Yijing Wang1ca01512013-06-27 20:53:42 +08005416 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005417 pci_restore_state(pdev);
5418
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305419 status = be_fw_wait_ready(adapter);
5420 if (status)
5421 return status;
5422
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005423 status = be_cmd_reset_function(adapter);
5424 if (status)
5425 return status;
5426
Ajit Khaparded4360d62013-11-22 12:51:09 -06005427 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005428 /* tell fw we're ready to fire cmds */
5429 status = be_cmd_fw_init(adapter);
5430 if (status)
5431 return status;
5432
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005433 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005434 if (netif_running(netdev)) {
5435 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005436 be_open(netdev);
5437 rtnl_unlock();
5438 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005439
5440 schedule_delayed_work(&adapter->func_recovery_work,
5441 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005442 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005443
Suresh Reddy76a9e082014-01-15 13:23:40 +05305444 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005445 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005446
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005447 return 0;
5448}
5449
Sathya Perla82456b02010-02-17 01:35:37 +00005450/*
5451 * An FLR will stop BE from DMAing any data.
5452 */
5453static void be_shutdown(struct pci_dev *pdev)
5454{
5455 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005456
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005457 if (!adapter)
5458 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005459
Devesh Sharmad114f992014-06-10 19:32:15 +05305460 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005461 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005462 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005463
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005464 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005465
Ajit Khaparde57841862011-04-06 18:08:43 +00005466 be_cmd_reset_function(adapter);
5467
Sathya Perla82456b02010-02-17 01:35:37 +00005468 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005469}
5470
Sathya Perlacf588472010-02-14 21:22:01 +00005471static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305472 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005473{
5474 struct be_adapter *adapter = pci_get_drvdata(pdev);
5475 struct net_device *netdev = adapter->netdev;
5476
5477 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5478
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005479 if (!adapter->eeh_error) {
5480 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005481
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005482 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005483
Sathya Perlacf588472010-02-14 21:22:01 +00005484 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005485 netif_device_detach(netdev);
5486 if (netif_running(netdev))
5487 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005488 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005489
5490 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005491 }
Sathya Perlacf588472010-02-14 21:22:01 +00005492
5493 if (state == pci_channel_io_perm_failure)
5494 return PCI_ERS_RESULT_DISCONNECT;
5495
5496 pci_disable_device(pdev);
5497
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005498 /* The error could cause the FW to trigger a flash debug dump.
5499 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005500 * can cause it not to recover; wait for it to finish.
5501 * Wait only for first function as it is needed only once per
5502 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005503 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005504 if (pdev->devfn == 0)
5505 ssleep(30);
5506
Sathya Perlacf588472010-02-14 21:22:01 +00005507 return PCI_ERS_RESULT_NEED_RESET;
5508}
5509
5510static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5511{
5512 struct be_adapter *adapter = pci_get_drvdata(pdev);
5513 int status;
5514
5515 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005516
5517 status = pci_enable_device(pdev);
5518 if (status)
5519 return PCI_ERS_RESULT_DISCONNECT;
5520
5521 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005522 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005523 pci_restore_state(pdev);
5524
5525 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005526 dev_info(&adapter->pdev->dev,
5527 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005528 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005529 if (status)
5530 return PCI_ERS_RESULT_DISCONNECT;
5531
Sathya Perlad6b6d982012-09-05 01:56:48 +00005532 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005533 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005534 return PCI_ERS_RESULT_RECOVERED;
5535}
5536
5537static void be_eeh_resume(struct pci_dev *pdev)
5538{
5539 int status = 0;
5540 struct be_adapter *adapter = pci_get_drvdata(pdev);
5541 struct net_device *netdev = adapter->netdev;
5542
5543 dev_info(&adapter->pdev->dev, "EEH resume\n");
5544
5545 pci_save_state(pdev);
5546
Kalesh AP2d177be2013-04-28 22:22:29 +00005547 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005548 if (status)
5549 goto err;
5550
Kalesh AP03a58ba2014-05-13 14:03:11 +05305551 /* On some BE3 FW versions, after a HW reset,
5552 * interrupts will remain disabled for each function.
5553 * So, explicitly enable interrupts
5554 */
5555 be_intr_set(adapter, true);
5556
Kalesh AP2d177be2013-04-28 22:22:29 +00005557 /* tell fw we're ready to fire cmds */
5558 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005559 if (status)
5560 goto err;
5561
Sathya Perlacf588472010-02-14 21:22:01 +00005562 status = be_setup(adapter);
5563 if (status)
5564 goto err;
5565
5566 if (netif_running(netdev)) {
5567 status = be_open(netdev);
5568 if (status)
5569 goto err;
5570 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005571
5572 schedule_delayed_work(&adapter->func_recovery_work,
5573 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005574 netif_device_attach(netdev);
5575 return;
5576err:
5577 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005578}
5579
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005580static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005581 .error_detected = be_eeh_err_detected,
5582 .slot_reset = be_eeh_reset,
5583 .resume = be_eeh_resume,
5584};
5585
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005586static struct pci_driver be_driver = {
5587 .name = DRV_NAME,
5588 .id_table = be_dev_ids,
5589 .probe = be_probe,
5590 .remove = be_remove,
5591 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005592 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005593 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005594 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005595};
5596
5597static int __init be_init_module(void)
5598{
Joe Perches8e95a202009-12-03 07:58:21 +00005599 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5600 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005601 printk(KERN_WARNING DRV_NAME
5602 " : Module param rx_frag_size must be 2048/4096/8192."
5603 " Using 2048\n");
5604 rx_frag_size = 2048;
5605 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005607 return pci_register_driver(&be_driver);
5608}
5609module_init(be_init_module);
5610
5611static void __exit be_exit_module(void)
5612{
5613 pci_unregister_driver(&be_driver);
5614}
5615module_exit(be_exit_module);