blob: 20305e1e0ec4955e923faeb8f7a38bd36c4491d4 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Benoit Taine9baa3c32014-08-08 15:56:03 +020041static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053083 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000087};
Kalesh APe2fb1af2014-09-19 15:46:58 +053088
Ajit Khaparde7c185272010-07-29 06:16:33 +000089/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530128
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530190
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700200{
201 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530202
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000205
206 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000207 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208}
209
Sathya Perla8788fdc2009-07-27 22:52:03 +0000210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530211 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212{
213 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000217
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000218 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000219 return;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231{
232 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000270 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 }
283
Sathya Perla5a712c12013-07-23 15:24:59 +0530284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000286 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000289 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000290 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
dingtianhong61d23e92013-12-30 15:40:43 +0800295 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530296 status = -EPERM;
297 goto err;
298 }
299
Somnath Koture3a7ae22011-10-27 07:14:05 +0000300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000302 return 0;
303err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700305 return status;
306}
307
Sathya Perlaca34fe32012-11-06 17:48:56 +0000308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
322 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
340 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
Sathya Perlaca34fe32012-11-06 17:48:56 +0000393static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
Ajit Khaparde61000862013-10-03 16:16:33 -0500439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530483 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500491}
492
Selvin Xavier005d5692011-05-16 07:36:35 +0000493static void populate_lancer_stats(struct be_adapter *adapter)
494{
Selvin Xavier005d5692011-05-16 07:36:35 +0000495 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000529
Sathya Perla09c1c682011-08-22 19:41:53 +0000530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
Jingoo Han4188e7d2013-08-05 18:02:02 +0900542static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530543 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000555void be_parse_stats(struct be_adapter *adapter)
556{
Ajit Khaparde61000862013-10-03 16:16:33 -0500557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558 struct be_rx_obj *rxo;
559 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000560 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else if (BE3_chip(adapter))
568 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000569 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else
571 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000578 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000579}
580
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530582 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000585 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700586 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000587 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000588 u64 pkts, bytes;
589 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700590 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Sathya Perla3abcded2010-10-03 22:12:27 -0700592 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530594
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 }
606
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
619 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
638 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000640
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000646 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647}
648
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 struct net_device *netdev = adapter->netdev;
652
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000654 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000657
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530658 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla3c8def92011-06-12 20:01:58 +0000666 struct be_tx_stats *stats = tx_stats(txo);
667
Sathya Perlaab1594e2011-07-25 19:10:15 +0000668 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000669 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000698 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530702 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100707 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
Sathya Perlac9c47142014-03-27 10:46:19 +0530717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530730static inline bool be_is_txq_full(struct be_tx_obj *txo)
731{
732 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
733}
734
735static inline bool be_can_txq_wake(struct be_tx_obj *txo)
736{
737 return atomic_read(&txo->q.used) < txo->q.len / 2;
738}
739
740static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
741{
742 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
743}
744
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530745static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
746 struct sk_buff *skb,
747 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530749 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000751 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530752 BE_WRB_F_SET(wrb_params->features, LSO, 1);
753 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000754 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530755 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530758 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530759 proto = skb_inner_ip_proto(skb);
760 } else {
761 proto = skb_ip_proto(skb);
762 }
763 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530764 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530765 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530766 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 }
768
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100769 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530770 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
771 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 }
773
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530774 BE_WRB_F_SET(wrb_params->features, CRC, 1);
775}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500776
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777static void wrb_fill_hdr(struct be_adapter *adapter,
778 struct be_eth_hdr_wrb *hdr,
779 struct be_wrb_params *wrb_params,
780 struct sk_buff *skb)
781{
782 memset(hdr, 0, sizeof(*hdr));
783
784 SET_TX_WRB_HDR_BITS(crc, hdr,
785 BE_WRB_F_GET(wrb_params->features, CRC));
786 SET_TX_WRB_HDR_BITS(ipcs, hdr,
787 BE_WRB_F_GET(wrb_params->features, IPCS));
788 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
789 BE_WRB_F_GET(wrb_params->features, TCPCS));
790 SET_TX_WRB_HDR_BITS(udpcs, hdr,
791 BE_WRB_F_GET(wrb_params->features, UDPCS));
792
793 SET_TX_WRB_HDR_BITS(lso, hdr,
794 BE_WRB_F_GET(wrb_params->features, LSO));
795 SET_TX_WRB_HDR_BITS(lso6, hdr,
796 BE_WRB_F_GET(wrb_params->features, LSO6));
797 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
798
799 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
800 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500801 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530802 SET_TX_WRB_HDR_BITS(event, hdr,
803 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
804 SET_TX_WRB_HDR_BITS(vlan, hdr,
805 BE_WRB_F_GET(wrb_params->features, VLAN));
806 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
807
808 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
809 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810}
811
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000812static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530813 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000814{
815 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500816 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000817
Sathya Perla7101e112010-03-22 20:41:12 +0000818
Sathya Perlaf986afc2015-02-06 08:18:43 -0500819 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
820 (u64)le32_to_cpu(wrb->frag_pa_lo);
821 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000822 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500823 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000824 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500825 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000826 }
827}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530829/* Grab a WRB header for xmit */
830static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530832 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530834 queue_head_inc(&txo->q);
835 return head;
836}
837
838/* Set up the WRB header for xmit */
839static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
840 struct be_tx_obj *txo,
841 struct be_wrb_params *wrb_params,
842 struct sk_buff *skb, u16 head)
843{
844 u32 num_frags = skb_wrb_cnt(skb);
845 struct be_queue_info *txq = &txo->q;
846 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
847
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530848 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500849 be_dws_cpu_to_le(hdr, sizeof(*hdr));
850
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500851 BUG_ON(txo->sent_skb_list[head]);
852 txo->sent_skb_list[head] = skb;
853 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530854 atomic_add(num_frags, &txq->used);
855 txo->last_req_wrb_cnt = num_frags;
856 txo->pend_wrb_cnt += num_frags;
857}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530859/* Setup a WRB fragment (buffer descriptor) for xmit */
860static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
861 int len)
862{
863 struct be_eth_wrb *wrb;
864 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530866 wrb = queue_head_node(txq);
867 wrb_fill(wrb, busaddr, len);
868 queue_head_inc(txq);
869}
870
871/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
872 * was invoked. The producer index is restored to the previous packet and the
873 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
874 */
875static void be_xmit_restore(struct be_adapter *adapter,
876 struct be_tx_obj *txo, u16 head, bool map_single,
877 u32 copied)
878{
879 struct device *dev;
880 struct be_eth_wrb *wrb;
881 struct be_queue_info *txq = &txo->q;
882
883 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500884 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530885
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500886 /* skip the first wrb (hdr); it's not mapped */
887 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000888 while (copied) {
889 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000890 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000891 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500892 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000893 queue_head_inc(txq);
894 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530895
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500896 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530897}
898
899/* Enqueue the given packet for transmit. This routine allocates WRBs for the
900 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
901 * of WRBs used up by the packet.
902 */
903static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
904 struct sk_buff *skb,
905 struct be_wrb_params *wrb_params)
906{
907 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
908 struct device *dev = &adapter->pdev->dev;
909 struct be_queue_info *txq = &txo->q;
910 bool map_single = false;
911 u16 head = txq->head;
912 dma_addr_t busaddr;
913 int len;
914
915 head = be_tx_get_wrb_hdr(txo);
916
917 if (skb->len > skb->data_len) {
918 len = skb_headlen(skb);
919
920 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
921 if (dma_mapping_error(dev, busaddr))
922 goto dma_err;
923 map_single = true;
924 be_tx_setup_wrb_frag(txo, busaddr, len);
925 copied += len;
926 }
927
928 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
929 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
930 len = skb_frag_size(frag);
931
932 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
933 if (dma_mapping_error(dev, busaddr))
934 goto dma_err;
935 be_tx_setup_wrb_frag(txo, busaddr, len);
936 copied += len;
937 }
938
939 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
940
941 be_tx_stats_update(txo, skb);
942 return wrb_cnt;
943
944dma_err:
945 adapter->drv_stats.dma_map_errors++;
946 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000947 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948}
949
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500950static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
951{
952 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
953}
954
Somnath Kotur93040ae2012-06-26 22:32:10 +0000955static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000956 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530957 struct be_wrb_params
958 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000959{
960 u16 vlan_tag = 0;
961
962 skb = skb_share_check(skb, GFP_ATOMIC);
963 if (unlikely(!skb))
964 return skb;
965
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100966 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000967 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530968
969 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
970 if (!vlan_tag)
971 vlan_tag = adapter->pvid;
972 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
973 * skip VLAN insertion
974 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530975 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530976 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000977
978 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100979 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
980 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000981 if (unlikely(!skb))
982 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000983 skb->vlan_tci = 0;
984 }
985
986 /* Insert the outer VLAN, if any */
987 if (adapter->qnq_vid) {
988 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100989 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
990 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000991 if (unlikely(!skb))
992 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530993 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000994 }
995
Somnath Kotur93040ae2012-06-26 22:32:10 +0000996 return skb;
997}
998
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1000{
1001 struct ethhdr *eh = (struct ethhdr *)skb->data;
1002 u16 offset = ETH_HLEN;
1003
1004 if (eh->h_proto == htons(ETH_P_IPV6)) {
1005 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1006
1007 offset += sizeof(struct ipv6hdr);
1008 if (ip6h->nexthdr != NEXTHDR_TCP &&
1009 ip6h->nexthdr != NEXTHDR_UDP) {
1010 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301011 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001012
1013 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1014 if (ehdr->hdrlen == 0xff)
1015 return true;
1016 }
1017 }
1018 return false;
1019}
1020
1021static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1022{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001023 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001024}
1025
Sathya Perla748b5392014-05-09 13:29:13 +05301026static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001027{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001028 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001029}
1030
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301031static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1032 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301033 struct be_wrb_params
1034 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001036 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 unsigned int eth_hdr_len;
1038 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001039
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001040 /* For padded packets, BE HW modifies tot_len field in IP header
1041 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001042 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001043 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001044 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1045 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001046 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001047 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001048 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001049 ip = (struct iphdr *)ip_hdr(skb);
1050 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1051 }
1052
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001053 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301054 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001055 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301056 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001057 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301058 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001059
Somnath Kotur93040ae2012-06-26 22:32:10 +00001060 /* HW has a bug wherein it will calculate CSUM for VLAN
1061 * pkts even though it is disabled.
1062 * Manually insert VLAN in pkt.
1063 */
1064 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001065 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301066 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001067 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301068 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001069 }
1070
1071 /* HW may lockup when VLAN HW tagging is requested on
1072 * certain ipv6 packets. Drop such pkts if the HW workaround to
1073 * skip HW tagging is not enabled by FW.
1074 */
1075 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301076 (adapter->pvid || adapter->qnq_vid) &&
1077 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001078 goto tx_drop;
1079
1080 /* Manual VLAN tag insertion to prevent:
1081 * ASIC lockup when the ASIC inserts VLAN tag into
1082 * certain ipv6 packets. Insert VLAN tags in driver,
1083 * and set event, completion, vlan bits accordingly
1084 * in the Tx WRB.
1085 */
1086 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1087 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001089 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301090 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001091 }
1092
Sathya Perlaee9c7992013-05-22 23:04:55 +00001093 return skb;
1094tx_drop:
1095 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301096err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001097 return NULL;
1098}
1099
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301100static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1101 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301102 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301103{
1104 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1105 * less may cause a transmit stall on that port. So the work-around is
1106 * to pad short packets (<= 32 bytes) to a 36-byte length.
1107 */
1108 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001109 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301110 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301111 }
1112
1113 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301114 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301115 if (!skb)
1116 return NULL;
1117 }
1118
1119 return skb;
1120}
1121
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001122static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1123{
1124 struct be_queue_info *txq = &txo->q;
1125 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1126
1127 /* Mark the last request eventable if it hasn't been marked already */
1128 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1129 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1130
1131 /* compose a dummy wrb if there are odd set of wrbs to notify */
1132 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001133 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001134 queue_head_inc(txq);
1135 atomic_inc(&txq->used);
1136 txo->pend_wrb_cnt++;
1137 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1138 TX_HDR_WRB_NUM_SHIFT);
1139 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1140 TX_HDR_WRB_NUM_SHIFT);
1141 }
1142 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1143 txo->pend_wrb_cnt = 0;
1144}
1145
Sathya Perlaee9c7992013-05-22 23:04:55 +00001146static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1147{
1148 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001149 u16 q_idx = skb_get_queue_mapping(skb);
1150 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301151 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301152 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001153 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001154
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301155 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001156 if (unlikely(!skb))
1157 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001158
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301159 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1160
1161 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001162 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001163 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001164 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001166
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301167 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001168 netif_stop_subqueue(netdev, q_idx);
1169 tx_stats(txo)->tx_stops++;
1170 }
1171
1172 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1173 be_xmit_flush(adapter, txo);
1174
1175 return NETDEV_TX_OK;
1176drop:
1177 tx_stats(txo)->tx_drv_drops++;
1178 /* Flush the already enqueued tx requests */
1179 if (flush && txo->pend_wrb_cnt)
1180 be_xmit_flush(adapter, txo);
1181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 return NETDEV_TX_OK;
1183}
1184
1185static int be_change_mtu(struct net_device *netdev, int new_mtu)
1186{
1187 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301188 struct device *dev = &adapter->pdev->dev;
1189
1190 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1191 dev_info(dev, "MTU must be between %d and %d bytes\n",
1192 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193 return -EINVAL;
1194 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301195
1196 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301197 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 netdev->mtu = new_mtu;
1199 return 0;
1200}
1201
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001202static inline bool be_in_all_promisc(struct be_adapter *adapter)
1203{
1204 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1205 BE_IF_FLAGS_ALL_PROMISCUOUS;
1206}
1207
1208static int be_set_vlan_promisc(struct be_adapter *adapter)
1209{
1210 struct device *dev = &adapter->pdev->dev;
1211 int status;
1212
1213 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1214 return 0;
1215
1216 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1217 if (!status) {
1218 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1219 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1220 } else {
1221 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1222 }
1223 return status;
1224}
1225
1226static int be_clear_vlan_promisc(struct be_adapter *adapter)
1227{
1228 struct device *dev = &adapter->pdev->dev;
1229 int status;
1230
1231 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1232 if (!status) {
1233 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1234 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1235 }
1236 return status;
1237}
1238
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001240 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1241 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242 */
Sathya Perla10329df2012-06-05 19:37:18 +00001243static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244{
Vasundhara Volam50762662014-09-12 17:39:14 +05301245 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001246 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301247 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001248 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001249
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001250 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001251 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001252 return 0;
1253
Sathya Perla92bf14a2013-08-27 16:57:32 +05301254 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001255 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001256
1257 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301258 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1259 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001260
Kalesh AP4d567d92014-05-09 13:29:17 +05301261 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001262 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001263 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001264 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301265 if (addl_status(status) ==
1266 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001267 return be_set_vlan_promisc(adapter);
1268 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1269 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001271 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272}
1273
Patrick McHardy80d5c362013-04-19 02:04:28 +00001274static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275{
1276 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001277 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001279 /* Packets with VID 0 are always received by Lancer by default */
1280 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301281 return status;
1282
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301283 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301284 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001285
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301286 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301287 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001288
Somnath Kotura6b74e02014-01-21 15:50:55 +05301289 status = be_vid_config(adapter);
1290 if (status) {
1291 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301292 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301293 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301294
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001295 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296}
1297
Patrick McHardy80d5c362013-04-19 02:04:28 +00001298static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299{
1300 struct be_adapter *adapter = netdev_priv(netdev);
1301
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001302 /* Packets with VID 0 are always received by Lancer by default */
1303 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301304 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001305
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301306 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301307 adapter->vlans_added--;
1308
1309 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310}
1311
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001312static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301313{
Sathya Perlaac34b742015-02-06 08:18:40 -05001314 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001315 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1316}
1317
1318static void be_set_all_promisc(struct be_adapter *adapter)
1319{
1320 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1321 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1322}
1323
1324static void be_set_mc_promisc(struct be_adapter *adapter)
1325{
1326 int status;
1327
1328 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1329 return;
1330
1331 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1332 if (!status)
1333 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1334}
1335
1336static void be_set_mc_list(struct be_adapter *adapter)
1337{
1338 int status;
1339
1340 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1341 if (!status)
1342 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1343 else
1344 be_set_mc_promisc(adapter);
1345}
1346
1347static void be_set_uc_list(struct be_adapter *adapter)
1348{
1349 struct netdev_hw_addr *ha;
1350 int i = 1; /* First slot is claimed by the Primary MAC */
1351
1352 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1353 be_cmd_pmac_del(adapter, adapter->if_handle,
1354 adapter->pmac_id[i], 0);
1355
1356 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1357 be_set_all_promisc(adapter);
1358 return;
1359 }
1360
1361 netdev_for_each_uc_addr(ha, adapter->netdev) {
1362 adapter->uc_macs++; /* First slot is for Primary MAC */
1363 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1364 &adapter->pmac_id[adapter->uc_macs], 0);
1365 }
1366}
1367
1368static void be_clear_uc_list(struct be_adapter *adapter)
1369{
1370 int i;
1371
1372 for (i = 1; i < (adapter->uc_macs + 1); i++)
1373 be_cmd_pmac_del(adapter, adapter->if_handle,
1374 adapter->pmac_id[i], 0);
1375 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301376}
1377
Sathya Perlaa54769f2011-10-24 02:45:00 +00001378static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
1380 struct be_adapter *adapter = netdev_priv(netdev);
1381
1382 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001383 be_set_all_promisc(adapter);
1384 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001386
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001387 /* Interface was previously in promiscuous mode; disable it */
1388 if (be_in_all_promisc(adapter)) {
1389 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001390 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001391 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001392 }
1393
Sathya Perlae7b909a2009-11-22 22:01:10 +00001394 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001395 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001396 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1397 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301398 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001399 }
Kalesh APa0794882014-05-30 19:06:23 +05301400
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001401 if (netdev_uc_count(netdev) != adapter->uc_macs)
1402 be_set_uc_list(adapter);
1403
1404 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405}
1406
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001407static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1408{
1409 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001410 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001411 int status;
1412
Sathya Perla11ac75e2011-12-13 00:58:50 +00001413 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001414 return -EPERM;
1415
Sathya Perla11ac75e2011-12-13 00:58:50 +00001416 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001417 return -EINVAL;
1418
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301419 /* Proceed further only if user provided MAC is different
1420 * from active MAC
1421 */
1422 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1423 return 0;
1424
Sathya Perla3175d8c2013-07-23 15:25:03 +05301425 if (BEx_chip(adapter)) {
1426 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1427 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001428
Sathya Perla11ac75e2011-12-13 00:58:50 +00001429 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1430 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301431 } else {
1432 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1433 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001434 }
1435
Kalesh APabccf232014-07-17 16:20:24 +05301436 if (status) {
1437 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1438 mac, vf, status);
1439 return be_cmd_status(status);
1440 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001441
Kalesh APabccf232014-07-17 16:20:24 +05301442 ether_addr_copy(vf_cfg->mac_addr, mac);
1443
1444 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001445}
1446
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001447static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301448 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001449{
1450 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001451 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001452
Sathya Perla11ac75e2011-12-13 00:58:50 +00001453 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001454 return -EPERM;
1455
Sathya Perla11ac75e2011-12-13 00:58:50 +00001456 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001457 return -EINVAL;
1458
1459 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001460 vi->max_tx_rate = vf_cfg->tx_rate;
1461 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001462 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1463 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001464 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301465 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001466
1467 return 0;
1468}
1469
Sathya Perla748b5392014-05-09 13:29:13 +05301470static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001471{
1472 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001473 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001474 int status = 0;
1475
Sathya Perla11ac75e2011-12-13 00:58:50 +00001476 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001477 return -EPERM;
1478
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001479 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001480 return -EINVAL;
1481
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001482 if (vlan || qos) {
1483 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301484 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001485 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1486 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001487 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001488 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301489 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1490 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001491 }
1492
Kalesh APabccf232014-07-17 16:20:24 +05301493 if (status) {
1494 dev_err(&adapter->pdev->dev,
1495 "VLAN %d config on VF %d failed : %#x\n", vlan,
1496 vf, status);
1497 return be_cmd_status(status);
1498 }
1499
1500 vf_cfg->vlan_tag = vlan;
1501
1502 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001503}
1504
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001505static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1506 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001507{
1508 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301509 struct device *dev = &adapter->pdev->dev;
1510 int percent_rate, status = 0;
1511 u16 link_speed = 0;
1512 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001513
Sathya Perla11ac75e2011-12-13 00:58:50 +00001514 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001515 return -EPERM;
1516
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001517 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001518 return -EINVAL;
1519
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001520 if (min_tx_rate)
1521 return -EINVAL;
1522
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301523 if (!max_tx_rate)
1524 goto config_qos;
1525
1526 status = be_cmd_link_status_query(adapter, &link_speed,
1527 &link_status, 0);
1528 if (status)
1529 goto err;
1530
1531 if (!link_status) {
1532 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301533 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301534 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001535 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001536
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301537 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1538 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1539 link_speed);
1540 status = -EINVAL;
1541 goto err;
1542 }
1543
1544 /* On Skyhawk the QOS setting must be done only as a % value */
1545 percent_rate = link_speed / 100;
1546 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1547 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1548 percent_rate);
1549 status = -EINVAL;
1550 goto err;
1551 }
1552
1553config_qos:
1554 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001555 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301556 goto err;
1557
1558 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1559 return 0;
1560
1561err:
1562 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1563 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301564 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001565}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301566
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301567static int be_set_vf_link_state(struct net_device *netdev, int vf,
1568 int link_state)
1569{
1570 struct be_adapter *adapter = netdev_priv(netdev);
1571 int status;
1572
1573 if (!sriov_enabled(adapter))
1574 return -EPERM;
1575
1576 if (vf >= adapter->num_vfs)
1577 return -EINVAL;
1578
1579 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301580 if (status) {
1581 dev_err(&adapter->pdev->dev,
1582 "Link state change on VF %d failed: %#x\n", vf, status);
1583 return be_cmd_status(status);
1584 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301585
Kalesh APabccf232014-07-17 16:20:24 +05301586 adapter->vf_cfg[vf].plink_tracking = link_state;
1587
1588 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301589}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001590
Sathya Perla2632baf2013-10-01 16:00:00 +05301591static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1592 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593{
Sathya Perla2632baf2013-10-01 16:00:00 +05301594 aic->rx_pkts_prev = rx_pkts;
1595 aic->tx_reqs_prev = tx_pkts;
1596 aic->jiffies = now;
1597}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001598
Sathya Perla2632baf2013-10-01 16:00:00 +05301599static void be_eqd_update(struct be_adapter *adapter)
1600{
1601 struct be_set_eqd set_eqd[MAX_EVT_QS];
1602 int eqd, i, num = 0, start;
1603 struct be_aic_obj *aic;
1604 struct be_eq_obj *eqo;
1605 struct be_rx_obj *rxo;
1606 struct be_tx_obj *txo;
1607 u64 rx_pkts, tx_pkts;
1608 ulong now;
1609 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001610
Sathya Perla2632baf2013-10-01 16:00:00 +05301611 for_all_evt_queues(adapter, eqo, i) {
1612 aic = &adapter->aic_obj[eqo->idx];
1613 if (!aic->enable) {
1614 if (aic->jiffies)
1615 aic->jiffies = 0;
1616 eqd = aic->et_eqd;
1617 goto modify_eqd;
1618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619
Sathya Perla2632baf2013-10-01 16:00:00 +05301620 rxo = &adapter->rx_obj[eqo->idx];
1621 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001622 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301623 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001624 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001625
Sathya Perla2632baf2013-10-01 16:00:00 +05301626 txo = &adapter->tx_obj[eqo->idx];
1627 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001628 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301629 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001630 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001631
Sathya Perla2632baf2013-10-01 16:00:00 +05301632 /* Skip, if wrapped around or first calculation */
1633 now = jiffies;
1634 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1635 rx_pkts < aic->rx_pkts_prev ||
1636 tx_pkts < aic->tx_reqs_prev) {
1637 be_aic_update(aic, rx_pkts, tx_pkts, now);
1638 continue;
1639 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001640
Sathya Perla2632baf2013-10-01 16:00:00 +05301641 delta = jiffies_to_msecs(now - aic->jiffies);
1642 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1643 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1644 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001645
Sathya Perla2632baf2013-10-01 16:00:00 +05301646 if (eqd < 8)
1647 eqd = 0;
1648 eqd = min_t(u32, eqd, aic->max_eqd);
1649 eqd = max_t(u32, eqd, aic->min_eqd);
1650
1651 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001652modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301653 if (eqd != aic->prev_eqd) {
1654 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1655 set_eqd[num].eq_id = eqo->q.id;
1656 aic->prev_eqd = eqd;
1657 num++;
1658 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001659 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301660
1661 if (num)
1662 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001663}
1664
Sathya Perla3abcded2010-10-03 22:12:27 -07001665static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301666 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001667{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001668 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001669
Sathya Perlaab1594e2011-07-25 19:10:15 +00001670 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001671 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001672 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001674 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001675 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001676 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001677 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001678 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679}
1680
Sathya Perla2e588f82011-03-11 02:49:26 +00001681static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001682{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001683 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301684 * Also ignore ipcksm for ipv6 pkts
1685 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001686 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301687 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001688}
1689
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301690static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001692 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001694 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301695 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
Sathya Perla3abcded2010-10-03 22:12:27 -07001697 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 BUG_ON(!rx_page_info->page);
1699
Sathya Perlae50287b2014-03-04 12:14:38 +05301700 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001701 dma_unmap_page(&adapter->pdev->dev,
1702 dma_unmap_addr(rx_page_info, bus),
1703 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301704 rx_page_info->last_frag = false;
1705 } else {
1706 dma_sync_single_for_cpu(&adapter->pdev->dev,
1707 dma_unmap_addr(rx_page_info, bus),
1708 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001709 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301711 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 atomic_dec(&rxq->used);
1713 return rx_page_info;
1714}
1715
1716/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001717static void be_rx_compl_discard(struct be_rx_obj *rxo,
1718 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001721 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001723 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301724 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001725 put_page(page_info->page);
1726 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727 }
1728}
1729
1730/*
1731 * skb_fill_rx_data forms a complete skb for an ether frame
1732 * indicated by rxcp.
1733 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001734static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1735 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001738 u16 i, j;
1739 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 u8 *start;
1741
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301742 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 start = page_address(page_info->page) + page_info->page_offset;
1744 prefetch(start);
1745
1746 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001747 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 skb->len = curr_frag_len;
1750 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001751 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 /* Complete packet has now been moved to data */
1753 put_page(page_info->page);
1754 skb->data_len = 0;
1755 skb->tail += curr_frag_len;
1756 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001757 hdr_len = ETH_HLEN;
1758 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001760 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 skb_shinfo(skb)->frags[0].page_offset =
1762 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301763 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1764 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001766 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767 skb->tail += hdr_len;
1768 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001769 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Sathya Perla2e588f82011-03-11 02:49:26 +00001771 if (rxcp->pkt_size <= rx_frag_size) {
1772 BUG_ON(rxcp->num_rcvd != 1);
1773 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 }
1775
1776 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001777 remaining = rxcp->pkt_size - curr_frag_len;
1778 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301779 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001780 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001782 /* Coalesce all frags from the same physical page in one slot */
1783 if (page_info->page_offset == 0) {
1784 /* Fresh page */
1785 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001786 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001787 skb_shinfo(skb)->frags[j].page_offset =
1788 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001789 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001790 skb_shinfo(skb)->nr_frags++;
1791 } else {
1792 put_page(page_info->page);
1793 }
1794
Eric Dumazet9e903e02011-10-18 21:00:24 +00001795 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796 skb->len += curr_frag_len;
1797 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001798 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001799 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001800 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001802 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803}
1804
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001805/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301806static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001807 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001809 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001810 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001812
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001813 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001814 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001815 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001816 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817 return;
1818 }
1819
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001822 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001823 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001824 else
1825 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001827 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001828 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001829 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001830 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301831
Tom Herbertb6c0e892014-08-27 21:27:17 -07001832 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301833 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834
Jiri Pirko343e43c2011-08-25 02:50:51 +00001835 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001836 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001837
1838 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839}
1840
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001841/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001842static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1843 struct napi_struct *napi,
1844 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001846 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001848 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001849 u16 remaining, curr_frag_len;
1850 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001851
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001852 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001853 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001854 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001855 return;
1856 }
1857
Sathya Perla2e588f82011-03-11 02:49:26 +00001858 remaining = rxcp->pkt_size;
1859 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301860 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
1862 curr_frag_len = min(remaining, rx_frag_size);
1863
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001864 /* Coalesce all frags from the same physical page in one slot */
1865 if (i == 0 || page_info->page_offset == 0) {
1866 /* First frag or Fresh page */
1867 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001868 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001869 skb_shinfo(skb)->frags[j].page_offset =
1870 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001871 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001872 } else {
1873 put_page(page_info->page);
1874 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001875 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001876 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 memset(page_info, 0, sizeof(*page_info));
1879 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001880 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001882 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001883 skb->len = rxcp->pkt_size;
1884 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001885 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001886 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001887 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001888 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301889
Tom Herbertb6c0e892014-08-27 21:27:17 -07001890 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301891 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001892
Jiri Pirko343e43c2011-08-25 02:50:51 +00001893 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001894 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001895
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001896 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897}
1898
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001899static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1900 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301902 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1903 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1904 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1905 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1906 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1907 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1908 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1909 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1910 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1911 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1912 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001913 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301914 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1915 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001916 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301917 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301918 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301919 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001920}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1923 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001924{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301925 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1926 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1927 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1928 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1929 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1930 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1931 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1932 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1933 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1934 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1935 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001936 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301937 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1938 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001939 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301940 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1941 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001942}
1943
1944static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1945{
1946 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1947 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1948 struct be_adapter *adapter = rxo->adapter;
1949
1950 /* For checking the valid bit it is Ok to use either definition as the
1951 * valid bit is at the same position in both v0 and v1 Rx compl */
1952 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953 return NULL;
1954
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001955 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001956 be_dws_le_to_cpu(compl, sizeof(*compl));
1957
1958 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001960 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001962
Somnath Koture38b1702013-05-29 22:55:56 +00001963 if (rxcp->ip_frag)
1964 rxcp->l4_csum = 0;
1965
Sathya Perla15d72182011-03-21 20:49:26 +00001966 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301967 /* In QNQ modes, if qnq bit is not set, then the packet was
1968 * tagged only with the transparent outer vlan-tag and must
1969 * not be treated as a vlan packet by host
1970 */
1971 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001972 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001973
Sathya Perla15d72182011-03-21 20:49:26 +00001974 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001975 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001976
Somnath Kotur939cf302011-08-18 21:51:49 -07001977 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301978 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001979 rxcp->vlanf = 0;
1980 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001981
1982 /* As the compl has been parsed, reset it; we wont touch it again */
1983 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986 return rxcp;
1987}
1988
Eric Dumazet1829b082011-03-01 05:48:12 +00001989static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001992
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001994 gfp |= __GFP_COMP;
1995 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996}
1997
1998/*
1999 * Allocate a page, split it to fragments of size rx_frag_size and post as
2000 * receive buffers to BE
2001 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302002static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003{
Sathya Perla3abcded2010-10-03 22:12:27 -07002004 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002005 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002006 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002008 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009 struct be_eth_rx_d *rxd;
2010 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302011 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012
Sathya Perla3abcded2010-10-03 22:12:27 -07002013 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302014 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002016 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002018 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 break;
2020 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002021 page_dmaaddr = dma_map_page(dev, pagep, 0,
2022 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002023 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002024 if (dma_mapping_error(dev, page_dmaaddr)) {
2025 put_page(pagep);
2026 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302027 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002028 break;
2029 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302030 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031 } else {
2032 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302033 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302035 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
2038 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302039 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2041 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042
2043 /* Any space left in the current big page for another frag? */
2044 if ((page_offset + rx_frag_size + rx_frag_size) >
2045 adapter->big_page_size) {
2046 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302047 page_info->last_frag = true;
2048 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2049 } else {
2050 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002052
2053 prev_page_info = page_info;
2054 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002055 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302057
2058 /* Mark the last frag of a page when we break out of the above loop
2059 * with no more slots available in the RXQ
2060 */
2061 if (pagep) {
2062 prev_page_info->last_frag = true;
2063 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2064 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065
2066 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302068 if (rxo->rx_post_starved)
2069 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302070 do {
2071 notify = min(256u, posted);
2072 be_rxq_notify(adapter, rxq->id, notify);
2073 posted -= notify;
2074 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002075 } else if (atomic_read(&rxq->used) == 0) {
2076 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079}
2080
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302081static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302083 struct be_queue_info *tx_cq = &txo->cq;
2084 struct be_tx_compl_info *txcp = &txo->txcp;
2085 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302087 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 return NULL;
2089
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302090 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002091 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302092 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302094 txcp->status = GET_TX_COMPL_BITS(status, compl);
2095 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302097 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098 queue_tail_inc(tx_cq);
2099 return txcp;
2100}
2101
Sathya Perla3c8def92011-06-12 20:01:58 +00002102static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302103 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104{
Sathya Perla3c8def92011-06-12 20:01:58 +00002105 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002106 struct be_queue_info *txq = &txo->q;
2107 u16 frag_index, num_wrbs = 0;
2108 struct sk_buff *skb = NULL;
2109 bool unmap_skb_hdr = false;
2110 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002112 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002113 if (sent_skbs[txq->tail]) {
2114 /* Free skb from prev req */
2115 if (skb)
2116 dev_consume_skb_any(skb);
2117 skb = sent_skbs[txq->tail];
2118 sent_skbs[txq->tail] = NULL;
2119 queue_tail_inc(txq); /* skip hdr wrb */
2120 num_wrbs++;
2121 unmap_skb_hdr = true;
2122 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002123 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002124 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002125 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002126 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002127 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002129 num_wrbs++;
2130 } while (frag_index != last_index);
2131 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002133 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134}
2135
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136/* Return the number of events in the event queue */
2137static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002138{
2139 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 do {
2143 eqe = queue_tail_node(&eqo->q);
2144 if (eqe->evt == 0)
2145 break;
2146
2147 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002148 eqe->evt = 0;
2149 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 queue_tail_inc(&eqo->q);
2151 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002152
2153 return num;
2154}
2155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156/* Leaves the EQ is disarmed state */
2157static void be_eq_clean(struct be_eq_obj *eqo)
2158{
2159 int num = events_get(eqo);
2160
2161 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2162}
2163
2164static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165{
2166 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002167 struct be_queue_info *rxq = &rxo->q;
2168 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002169 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002170 struct be_adapter *adapter = rxo->adapter;
2171 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172
Sathya Perlad23e9462012-12-17 19:38:51 +00002173 /* Consume pending rx completions.
2174 * Wait for the flush completion (identified by zero num_rcvd)
2175 * to arrive. Notify CQ even when there are no more CQ entries
2176 * for HW to flush partially coalesced CQ entries.
2177 * In Lancer, there is no need to wait for flush compl.
2178 */
2179 for (;;) {
2180 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302181 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002182 if (lancer_chip(adapter))
2183 break;
2184
2185 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2186 dev_warn(&adapter->pdev->dev,
2187 "did not receive flush compl\n");
2188 break;
2189 }
2190 be_cq_notify(adapter, rx_cq->id, true, 0);
2191 mdelay(1);
2192 } else {
2193 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002194 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002195 if (rxcp->num_rcvd == 0)
2196 break;
2197 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 }
2199
Sathya Perlad23e9462012-12-17 19:38:51 +00002200 /* After cleanup, leave the CQ in unarmed state */
2201 be_cq_notify(adapter, rx_cq->id, false, 0);
2202
2203 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302204 while (atomic_read(&rxq->used) > 0) {
2205 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206 put_page(page_info->page);
2207 memset(page_info, 0, sizeof(*page_info));
2208 }
2209 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302210 rxq->tail = 0;
2211 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212}
2213
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002214static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002216 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2217 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302218 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002219 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302220 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002221 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302223 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002224 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002225 pending_txqs = adapter->num_tx_qs;
2226
2227 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302228 cmpl = 0;
2229 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002230 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302231 while ((txcp = be_tx_compl_get(txo))) {
2232 num_wrbs +=
2233 be_tx_compl_process(adapter, txo,
2234 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002235 cmpl++;
2236 }
2237 if (cmpl) {
2238 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2239 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302240 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002241 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302242 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002243 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002244 }
2245
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302246 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002247 break;
2248
2249 mdelay(1);
2250 } while (true);
2251
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002252 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002253 for_all_tx_queues(adapter, txo, i) {
2254 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002255
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002256 if (atomic_read(&txq->used)) {
2257 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2258 i, atomic_read(&txq->used));
2259 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002260 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002261 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2262 txq->len);
2263 /* Use the tx-compl process logic to handle requests
2264 * that were not sent to the HW.
2265 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002266 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2267 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002268 BUG_ON(atomic_read(&txq->used));
2269 txo->pend_wrb_cnt = 0;
2270 /* Since hw was never notified of these requests,
2271 * reset TXQ indices
2272 */
2273 txq->head = notified_idx;
2274 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002275 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002276 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277}
2278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279static void be_evt_queues_destroy(struct be_adapter *adapter)
2280{
2281 struct be_eq_obj *eqo;
2282 int i;
2283
2284 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002285 if (eqo->q.created) {
2286 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302288 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302289 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002290 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 be_queue_free(adapter, &eqo->q);
2292 }
2293}
2294
2295static int be_evt_queues_create(struct be_adapter *adapter)
2296{
2297 struct be_queue_info *eq;
2298 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302299 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 int i, rc;
2301
Sathya Perla92bf14a2013-08-27 16:57:32 +05302302 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2303 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304
2305 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302306 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2307 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302308 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302309 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302312 aic->max_eqd = BE_MAX_EQD;
2313 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314
2315 eq = &eqo->q;
2316 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302317 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 if (rc)
2319 return rc;
2320
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302321 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 if (rc)
2323 return rc;
2324 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002325 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326}
2327
Sathya Perla5fb379e2009-06-18 00:02:59 +00002328static void be_mcc_queues_destroy(struct be_adapter *adapter)
2329{
2330 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002331
Sathya Perla8788fdc2009-07-27 22:52:03 +00002332 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002333 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002334 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002335 be_queue_free(adapter, q);
2336
Sathya Perla8788fdc2009-07-27 22:52:03 +00002337 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002338 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002339 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002340 be_queue_free(adapter, q);
2341}
2342
2343/* Must be called only after TX qs are created as MCC shares TX EQ */
2344static int be_mcc_queues_create(struct be_adapter *adapter)
2345{
2346 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002347
Sathya Perla8788fdc2009-07-27 22:52:03 +00002348 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002349 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302350 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002351 goto err;
2352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 /* Use the default EQ for MCC completions */
2354 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002355 goto mcc_cq_free;
2356
Sathya Perla8788fdc2009-07-27 22:52:03 +00002357 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002358 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2359 goto mcc_cq_destroy;
2360
Sathya Perla8788fdc2009-07-27 22:52:03 +00002361 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002362 goto mcc_q_free;
2363
2364 return 0;
2365
2366mcc_q_free:
2367 be_queue_free(adapter, q);
2368mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002369 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002370mcc_cq_free:
2371 be_queue_free(adapter, cq);
2372err:
2373 return -1;
2374}
2375
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376static void be_tx_queues_destroy(struct be_adapter *adapter)
2377{
2378 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002379 struct be_tx_obj *txo;
2380 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381
Sathya Perla3c8def92011-06-12 20:01:58 +00002382 for_all_tx_queues(adapter, txo, i) {
2383 q = &txo->q;
2384 if (q->created)
2385 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2386 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387
Sathya Perla3c8def92011-06-12 20:01:58 +00002388 q = &txo->cq;
2389 if (q->created)
2390 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2391 be_queue_free(adapter, q);
2392 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393}
2394
Sathya Perla77071332013-08-27 16:57:34 +05302395static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002398 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302399 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400
Sathya Perla92bf14a2013-08-27 16:57:32 +05302401 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002402
Sathya Perla3c8def92011-06-12 20:01:58 +00002403 for_all_tx_queues(adapter, txo, i) {
2404 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2406 sizeof(struct be_eth_tx_compl));
2407 if (status)
2408 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409
John Stultz827da442013-10-07 15:51:58 -07002410 u64_stats_init(&txo->stats.sync);
2411 u64_stats_init(&txo->stats.sync_compl);
2412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 /* If num_evt_qs is less than num_tx_qs, then more than
2414 * one txq share an eq
2415 */
2416 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2417 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2418 if (status)
2419 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2422 sizeof(struct be_eth_wrb));
2423 if (status)
2424 return status;
2425
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002426 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427 if (status)
2428 return status;
2429 }
2430
Sathya Perlad3791422012-09-28 04:39:44 +00002431 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2432 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 return 0;
2434}
2435
2436static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437{
2438 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002439 struct be_rx_obj *rxo;
2440 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441
Sathya Perla3abcded2010-10-03 22:12:27 -07002442 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002443 q = &rxo->cq;
2444 if (q->created)
2445 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2446 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448}
2449
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002450static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002451{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002453 struct be_rx_obj *rxo;
2454 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002455
Sathya Perla92bf14a2013-08-27 16:57:32 +05302456 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002457 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302458
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002459 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2460 if (adapter->num_rss_qs <= 1)
2461 adapter->num_rss_qs = 0;
2462
2463 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2464
2465 /* When the interface is not capable of RSS rings (and there is no
2466 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002468 if (adapter->num_rx_qs == 0)
2469 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302470
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002472 for_all_rx_queues(adapter, rxo, i) {
2473 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002474 cq = &rxo->cq;
2475 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302476 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002477 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479
John Stultz827da442013-10-07 15:51:58 -07002480 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002481 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2482 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002483 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002484 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002485 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486
Sathya Perlad3791422012-09-28 04:39:44 +00002487 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002488 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002489 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002490}
2491
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492static irqreturn_t be_intx(int irq, void *dev)
2493{
Sathya Perlae49cc342012-11-27 19:50:02 +00002494 struct be_eq_obj *eqo = dev;
2495 struct be_adapter *adapter = eqo->adapter;
2496 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002498 /* IRQ is not expected when NAPI is scheduled as the EQ
2499 * will not be armed.
2500 * But, this can happen on Lancer INTx where it takes
2501 * a while to de-assert INTx or in BE2 where occasionaly
2502 * an interrupt may be raised even when EQ is unarmed.
2503 * If NAPI is already scheduled, then counting & notifying
2504 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002505 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002506 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002507 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002508 __napi_schedule(&eqo->napi);
2509 if (num_evts)
2510 eqo->spurious_intr = 0;
2511 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002512 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002513
2514 /* Return IRQ_HANDLED only for the the first spurious intr
2515 * after a valid intr to stop the kernel from branding
2516 * this irq as a bad one!
2517 */
2518 if (num_evts || eqo->spurious_intr++ == 0)
2519 return IRQ_HANDLED;
2520 else
2521 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522}
2523
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002525{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527
Sathya Perla0b545a62012-11-23 00:27:18 +00002528 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2529 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002530 return IRQ_HANDLED;
2531}
2532
Sathya Perla2e588f82011-03-11 02:49:26 +00002533static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534{
Somnath Koture38b1702013-05-29 22:55:56 +00002535 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536}
2537
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002538static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302539 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540{
Sathya Perla3abcded2010-10-03 22:12:27 -07002541 struct be_adapter *adapter = rxo->adapter;
2542 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002543 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302545 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546
2547 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002548 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549 if (!rxcp)
2550 break;
2551
Sathya Perla12004ae2011-08-02 19:57:46 +00002552 /* Is it a flush compl that has no data */
2553 if (unlikely(rxcp->num_rcvd == 0))
2554 goto loop_continue;
2555
2556 /* Discard compl with partial DMA Lancer B0 */
2557 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002558 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002559 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002560 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002561
Sathya Perla12004ae2011-08-02 19:57:46 +00002562 /* On BE drop pkts that arrive due to imperfect filtering in
2563 * promiscuous mode on some skews
2564 */
2565 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302566 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002568 goto loop_continue;
2569 }
2570
Sathya Perla6384a4d2013-10-25 10:40:16 +05302571 /* Don't do gro when we're busy_polling */
2572 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002574 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302575 be_rx_compl_process(rxo, napi, rxcp);
2576
Sathya Perla12004ae2011-08-02 19:57:46 +00002577loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302578 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002579 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580 }
2581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002582 if (work_done) {
2583 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002584
Sathya Perla6384a4d2013-10-25 10:40:16 +05302585 /* When an rx-obj gets into post_starved state, just
2586 * let be_worker do the posting.
2587 */
2588 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2589 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302590 be_post_rx_frags(rxo, GFP_ATOMIC,
2591 max_t(u32, MAX_RX_POST,
2592 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002595 return work_done;
2596}
2597
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302598static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302599{
2600 switch (status) {
2601 case BE_TX_COMP_HDR_PARSE_ERR:
2602 tx_stats(txo)->tx_hdr_parse_err++;
2603 break;
2604 case BE_TX_COMP_NDMA_ERR:
2605 tx_stats(txo)->tx_dma_err++;
2606 break;
2607 case BE_TX_COMP_ACL_ERR:
2608 tx_stats(txo)->tx_spoof_check_err++;
2609 break;
2610 }
2611}
2612
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302613static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302614{
2615 switch (status) {
2616 case LANCER_TX_COMP_LSO_ERR:
2617 tx_stats(txo)->tx_tso_err++;
2618 break;
2619 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2620 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2621 tx_stats(txo)->tx_spoof_check_err++;
2622 break;
2623 case LANCER_TX_COMP_QINQ_ERR:
2624 tx_stats(txo)->tx_qinq_err++;
2625 break;
2626 case LANCER_TX_COMP_PARITY_ERR:
2627 tx_stats(txo)->tx_internal_parity_err++;
2628 break;
2629 case LANCER_TX_COMP_DMA_ERR:
2630 tx_stats(txo)->tx_dma_err++;
2631 break;
2632 }
2633}
2634
Sathya Perlac8f64612014-09-02 09:56:55 +05302635static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2636 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637{
Sathya Perlac8f64612014-09-02 09:56:55 +05302638 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302639 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302641 while ((txcp = be_tx_compl_get(txo))) {
2642 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302643 work_done++;
2644
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302645 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302646 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302647 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302648 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302649 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302650 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 }
2652
2653 if (work_done) {
2654 be_cq_notify(adapter, txo->cq.id, true, work_done);
2655 atomic_sub(num_wrbs, &txo->q.used);
2656
2657 /* As Tx wrbs have been freed up, wake up netdev queue
2658 * if it was stopped due to lack of tx wrbs. */
2659 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302660 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002662 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002663
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2665 tx_stats(txo)->tx_compl += work_done;
2666 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2667 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002668}
Sathya Perla3c8def92011-06-12 20:01:58 +00002669
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002670#ifdef CONFIG_NET_RX_BUSY_POLL
2671static inline bool be_lock_napi(struct be_eq_obj *eqo)
2672{
2673 bool status = true;
2674
2675 spin_lock(&eqo->lock); /* BH is already disabled */
2676 if (eqo->state & BE_EQ_LOCKED) {
2677 WARN_ON(eqo->state & BE_EQ_NAPI);
2678 eqo->state |= BE_EQ_NAPI_YIELD;
2679 status = false;
2680 } else {
2681 eqo->state = BE_EQ_NAPI;
2682 }
2683 spin_unlock(&eqo->lock);
2684 return status;
2685}
2686
2687static inline void be_unlock_napi(struct be_eq_obj *eqo)
2688{
2689 spin_lock(&eqo->lock); /* BH is already disabled */
2690
2691 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2692 eqo->state = BE_EQ_IDLE;
2693
2694 spin_unlock(&eqo->lock);
2695}
2696
2697static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2698{
2699 bool status = true;
2700
2701 spin_lock_bh(&eqo->lock);
2702 if (eqo->state & BE_EQ_LOCKED) {
2703 eqo->state |= BE_EQ_POLL_YIELD;
2704 status = false;
2705 } else {
2706 eqo->state |= BE_EQ_POLL;
2707 }
2708 spin_unlock_bh(&eqo->lock);
2709 return status;
2710}
2711
2712static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2713{
2714 spin_lock_bh(&eqo->lock);
2715
2716 WARN_ON(eqo->state & (BE_EQ_NAPI));
2717 eqo->state = BE_EQ_IDLE;
2718
2719 spin_unlock_bh(&eqo->lock);
2720}
2721
2722static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2723{
2724 spin_lock_init(&eqo->lock);
2725 eqo->state = BE_EQ_IDLE;
2726}
2727
2728static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2729{
2730 local_bh_disable();
2731
2732 /* It's enough to just acquire napi lock on the eqo to stop
2733 * be_busy_poll() from processing any queueus.
2734 */
2735 while (!be_lock_napi(eqo))
2736 mdelay(1);
2737
2738 local_bh_enable();
2739}
2740
2741#else /* CONFIG_NET_RX_BUSY_POLL */
2742
2743static inline bool be_lock_napi(struct be_eq_obj *eqo)
2744{
2745 return true;
2746}
2747
2748static inline void be_unlock_napi(struct be_eq_obj *eqo)
2749{
2750}
2751
2752static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2753{
2754 return false;
2755}
2756
2757static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2758{
2759}
2760
2761static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2762{
2763}
2764
2765static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2766{
2767}
2768#endif /* CONFIG_NET_RX_BUSY_POLL */
2769
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302770int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002771{
2772 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2773 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002774 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302775 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302776 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002777
Sathya Perla0b545a62012-11-23 00:27:18 +00002778 num_evts = events_get(eqo);
2779
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302780 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2781 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002782
Sathya Perla6384a4d2013-10-25 10:40:16 +05302783 if (be_lock_napi(eqo)) {
2784 /* This loop will iterate twice for EQ0 in which
2785 * completions of the last RXQ (default one) are also processed
2786 * For other EQs the loop iterates only once
2787 */
2788 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2789 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2790 max_work = max(work, max_work);
2791 }
2792 be_unlock_napi(eqo);
2793 } else {
2794 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002795 }
2796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002797 if (is_mcc_eqo(eqo))
2798 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002799
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 if (max_work < budget) {
2801 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002802 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 } else {
2804 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002805 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002806 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002808}
2809
Sathya Perla6384a4d2013-10-25 10:40:16 +05302810#ifdef CONFIG_NET_RX_BUSY_POLL
2811static int be_busy_poll(struct napi_struct *napi)
2812{
2813 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2814 struct be_adapter *adapter = eqo->adapter;
2815 struct be_rx_obj *rxo;
2816 int i, work = 0;
2817
2818 if (!be_lock_busy_poll(eqo))
2819 return LL_FLUSH_BUSY;
2820
2821 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2822 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2823 if (work)
2824 break;
2825 }
2826
2827 be_unlock_busy_poll(eqo);
2828 return work;
2829}
2830#endif
2831
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002832void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002833{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002834 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2835 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002836 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302837 bool error_detected = false;
2838 struct device *dev = &adapter->pdev->dev;
2839 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002840
Sathya Perlad23e9462012-12-17 19:38:51 +00002841 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002842 return;
2843
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002844 if (lancer_chip(adapter)) {
2845 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2846 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2847 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302848 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002849 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302850 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302851 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05002852 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302853 /* Do not log error messages if its a FW reset */
2854 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2855 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2856 dev_info(dev, "Firmware update in progress\n");
2857 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302858 dev_err(dev, "Error detected in the card\n");
2859 dev_err(dev, "ERR: sliport status 0x%x\n",
2860 sliport_status);
2861 dev_err(dev, "ERR: sliport error1 0x%x\n",
2862 sliport_err1);
2863 dev_err(dev, "ERR: sliport error2 0x%x\n",
2864 sliport_err2);
2865 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002866 }
2867 } else {
2868 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302869 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002870 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302871 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002872 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302873 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002874 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302875 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002876
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002877 ue_lo = (ue_lo & ~ue_lo_mask);
2878 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002879
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302880 /* On certain platforms BE hardware can indicate spurious UEs.
2881 * Allow HW to stop working completely in case of a real UE.
2882 * Hence not setting the hw_error for UE detection.
2883 */
2884
2885 if (ue_lo || ue_hi) {
2886 error_detected = true;
2887 dev_err(dev,
2888 "Unrecoverable Error detected in the adapter");
2889 dev_err(dev, "Please reboot server to recover");
2890 if (skyhawk_chip(adapter))
2891 adapter->hw_error = true;
2892 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2893 if (ue_lo & 1)
2894 dev_err(dev, "UE: %s bit set\n",
2895 ue_status_low_desc[i]);
2896 }
2897 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2898 if (ue_hi & 1)
2899 dev_err(dev, "UE: %s bit set\n",
2900 ue_status_hi_desc[i]);
2901 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302902 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002903 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302904 if (error_detected)
2905 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002906}
2907
Sathya Perla8d56ff12009-11-22 22:02:26 +00002908static void be_msix_disable(struct be_adapter *adapter)
2909{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002910 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002911 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002912 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302913 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002914 }
2915}
2916
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002917static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002919 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002920 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002921
Sathya Perla92bf14a2013-08-27 16:57:32 +05302922 /* If RoCE is supported, program the max number of NIC vectors that
2923 * may be configured via set-channels, along with vectors needed for
2924 * RoCe. Else, just program the number we'll use initially.
2925 */
2926 if (be_roce_supported(adapter))
2927 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2928 2 * num_online_cpus());
2929 else
2930 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002931
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002932 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933 adapter->msix_entries[i].entry = i;
2934
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002935 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2936 MIN_MSIX_VECTORS, num_vec);
2937 if (num_vec < 0)
2938 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002939
Sathya Perla92bf14a2013-08-27 16:57:32 +05302940 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2941 adapter->num_msix_roce_vec = num_vec / 2;
2942 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2943 adapter->num_msix_roce_vec);
2944 }
2945
2946 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2947
2948 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2949 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002950 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002951
2952fail:
2953 dev_warn(dev, "MSIx enable failed\n");
2954
2955 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2956 if (!be_physfn(adapter))
2957 return num_vec;
2958 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002959}
2960
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002961static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302962 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302964 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965}
2966
2967static int be_msix_register(struct be_adapter *adapter)
2968{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002969 struct net_device *netdev = adapter->netdev;
2970 struct be_eq_obj *eqo;
2971 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002973 for_all_evt_queues(adapter, eqo, i) {
2974 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2975 vec = be_msix_vec_get(adapter, eqo);
2976 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002977 if (status)
2978 goto err_msix;
2979 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002980
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002982err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002983 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2984 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2985 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302986 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002987 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988 return status;
2989}
2990
2991static int be_irq_register(struct be_adapter *adapter)
2992{
2993 struct net_device *netdev = adapter->netdev;
2994 int status;
2995
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002996 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 status = be_msix_register(adapter);
2998 if (status == 0)
2999 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003000 /* INTx is not supported for VF */
3001 if (!be_physfn(adapter))
3002 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003003 }
3004
Sathya Perlae49cc342012-11-27 19:50:02 +00003005 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006 netdev->irq = adapter->pdev->irq;
3007 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003008 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003009 if (status) {
3010 dev_err(&adapter->pdev->dev,
3011 "INTx request IRQ failed - err %d\n", status);
3012 return status;
3013 }
3014done:
3015 adapter->isr_registered = true;
3016 return 0;
3017}
3018
3019static void be_irq_unregister(struct be_adapter *adapter)
3020{
3021 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003022 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003023 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
3025 if (!adapter->isr_registered)
3026 return;
3027
3028 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003029 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003030 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031 goto done;
3032 }
3033
3034 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003035 for_all_evt_queues(adapter, eqo, i)
3036 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038done:
3039 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003040}
3041
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003042static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003043{
3044 struct be_queue_info *q;
3045 struct be_rx_obj *rxo;
3046 int i;
3047
3048 for_all_rx_queues(adapter, rxo, i) {
3049 q = &rxo->q;
3050 if (q->created) {
3051 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003052 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003053 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003054 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003055 }
3056}
3057
Sathya Perla889cd4b2010-05-30 23:33:45 +00003058static int be_close(struct net_device *netdev)
3059{
3060 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003061 struct be_eq_obj *eqo;
3062 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003063
Kalesh APe1ad8e32014-04-14 16:12:41 +05303064 /* This protection is needed as be_close() may be called even when the
3065 * adapter is in cleared state (after eeh perm failure)
3066 */
3067 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3068 return 0;
3069
Parav Pandit045508a2012-03-26 14:27:13 +00003070 be_roce_dev_close(adapter);
3071
Ivan Veceradff345c52013-11-27 08:59:32 +01003072 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3073 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003074 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303075 be_disable_busy_poll(eqo);
3076 }
David S. Miller71237b62013-11-28 18:53:36 -05003077 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003078 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003079
3080 be_async_mcc_disable(adapter);
3081
3082 /* Wait for all pending tx completions to arrive so that
3083 * all tx skbs are freed.
3084 */
Sathya Perlafba87552013-05-08 02:05:50 +00003085 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303086 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003087
3088 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003089 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003090
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003091 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003092 if (msix_enabled(adapter))
3093 synchronize_irq(be_msix_vec_get(adapter, eqo));
3094 else
3095 synchronize_irq(netdev->irq);
3096 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003097 }
3098
Sathya Perla889cd4b2010-05-30 23:33:45 +00003099 be_irq_unregister(adapter);
3100
Sathya Perla482c9e72011-06-29 23:33:17 +00003101 return 0;
3102}
3103
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003104static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003105{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003106 struct rss_info *rss = &adapter->rss_info;
3107 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003108 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003109 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003110
3111 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003112 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3113 sizeof(struct be_eth_rx_d));
3114 if (rc)
3115 return rc;
3116 }
3117
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003118 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3119 rxo = default_rxo(adapter);
3120 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3121 rx_frag_size, adapter->if_handle,
3122 false, &rxo->rss_id);
3123 if (rc)
3124 return rc;
3125 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003126
3127 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003128 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 rx_frag_size, adapter->if_handle,
3130 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003131 if (rc)
3132 return rc;
3133 }
3134
3135 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003136 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003137 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303138 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003139 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303140 rss->rsstable[j + i] = rxo->rss_id;
3141 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003142 }
3143 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303144 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3145 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003146
3147 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303148 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3149 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303150 } else {
3151 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303152 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303153 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003154
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003155 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303156 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003157 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303158 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303159 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303160 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003161 }
3162
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003163 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303164
Sathya Perla482c9e72011-06-29 23:33:17 +00003165 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003166 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303167 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003168 return 0;
3169}
3170
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171static int be_open(struct net_device *netdev)
3172{
3173 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003174 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003175 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003176 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003177 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003178 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003179
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003180 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003181 if (status)
3182 goto err;
3183
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003184 status = be_irq_register(adapter);
3185 if (status)
3186 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003187
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003188 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003189 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003191 for_all_tx_queues(adapter, txo, i)
3192 be_cq_notify(adapter, txo->cq.id, true, 0);
3193
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003194 be_async_mcc_enable(adapter);
3195
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003196 for_all_evt_queues(adapter, eqo, i) {
3197 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303198 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303199 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003200 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003201 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003202
Sathya Perla323ff712012-09-28 04:39:43 +00003203 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003204 if (!status)
3205 be_link_status_update(adapter, link_status);
3206
Sathya Perlafba87552013-05-08 02:05:50 +00003207 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003208 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303209
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303210#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303211 if (skyhawk_chip(adapter))
3212 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303213#endif
3214
Sathya Perla889cd4b2010-05-30 23:33:45 +00003215 return 0;
3216err:
3217 be_close(adapter->netdev);
3218 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003219}
3220
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003221static int be_setup_wol(struct be_adapter *adapter, bool enable)
3222{
3223 struct be_dma_mem cmd;
3224 int status = 0;
3225 u8 mac[ETH_ALEN];
3226
Joe Perchesc7bf7162015-03-02 19:54:47 -08003227 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003228
3229 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07003230 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3231 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303232 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303233 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003234
3235 if (enable) {
3236 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303237 PCICFG_PM_CONTROL_OFFSET,
3238 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003239 if (status) {
3240 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003241 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003242 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3243 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003244 return status;
3245 }
3246 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303247 adapter->netdev->dev_addr,
3248 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003249 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3250 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3251 } else {
3252 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3253 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3254 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3255 }
3256
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003257 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003258 return status;
3259}
3260
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003261static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3262{
3263 u32 addr;
3264
3265 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3266
3267 mac[5] = (u8)(addr & 0xFF);
3268 mac[4] = (u8)((addr >> 8) & 0xFF);
3269 mac[3] = (u8)((addr >> 16) & 0xFF);
3270 /* Use the OUI from the current MAC address */
3271 memcpy(mac, adapter->netdev->dev_addr, 3);
3272}
3273
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003274/*
3275 * Generate a seed MAC address from the PF MAC Address using jhash.
3276 * MAC Address for VFs are assigned incrementally starting from the seed.
3277 * These addresses are programmed in the ASIC by the PF and the VF driver
3278 * queries for the MAC address during its probe.
3279 */
Sathya Perla4c876612013-02-03 20:30:11 +00003280static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003281{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003282 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003283 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003284 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003285 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003286
3287 be_vf_eth_addr_generate(adapter, mac);
3288
Sathya Perla11ac75e2011-12-13 00:58:50 +00003289 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303290 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003291 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003292 vf_cfg->if_handle,
3293 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303294 else
3295 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3296 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003297
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003298 if (status)
3299 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303300 "Mac address assignment failed for VF %d\n",
3301 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003302 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003303 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003304
3305 mac[5] += 1;
3306 }
3307 return status;
3308}
3309
Sathya Perla4c876612013-02-03 20:30:11 +00003310static int be_vfs_mac_query(struct be_adapter *adapter)
3311{
3312 int status, vf;
3313 u8 mac[ETH_ALEN];
3314 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003315
3316 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303317 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3318 mac, vf_cfg->if_handle,
3319 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003320 if (status)
3321 return status;
3322 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3323 }
3324 return 0;
3325}
3326
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003327static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003328{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003329 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003330 u32 vf;
3331
Sathya Perla257a3fe2013-06-14 15:54:51 +05303332 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003333 dev_warn(&adapter->pdev->dev,
3334 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003335 goto done;
3336 }
3337
Sathya Perlab4c1df92013-05-08 02:05:47 +00003338 pci_disable_sriov(adapter->pdev);
3339
Sathya Perla11ac75e2011-12-13 00:58:50 +00003340 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303341 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003342 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3343 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303344 else
3345 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3346 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003347
Sathya Perla11ac75e2011-12-13 00:58:50 +00003348 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3349 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003350done:
3351 kfree(adapter->vf_cfg);
3352 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303353 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003354}
3355
Sathya Perla77071332013-08-27 16:57:34 +05303356static void be_clear_queues(struct be_adapter *adapter)
3357{
3358 be_mcc_queues_destroy(adapter);
3359 be_rx_cqs_destroy(adapter);
3360 be_tx_queues_destroy(adapter);
3361 be_evt_queues_destroy(adapter);
3362}
3363
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303364static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003365{
Sathya Perla191eb752012-02-23 18:50:13 +00003366 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3367 cancel_delayed_work_sync(&adapter->work);
3368 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3369 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303370}
3371
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003372static void be_cancel_err_detection(struct be_adapter *adapter)
3373{
3374 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3375 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3376 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3377 }
3378}
3379
Somnath Koturb05004a2013-12-05 12:08:16 +05303380static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303381{
Somnath Koturb05004a2013-12-05 12:08:16 +05303382 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003383 be_cmd_pmac_del(adapter, adapter->if_handle,
3384 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303385 kfree(adapter->pmac_id);
3386 adapter->pmac_id = NULL;
3387 }
3388}
3389
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303390#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303391static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3392{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003393 struct net_device *netdev = adapter->netdev;
3394
Sathya Perlac9c47142014-03-27 10:46:19 +05303395 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3396 be_cmd_manage_iface(adapter, adapter->if_handle,
3397 OP_CONVERT_TUNNEL_TO_NORMAL);
3398
3399 if (adapter->vxlan_port)
3400 be_cmd_set_vxlan_port(adapter, 0);
3401
3402 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3403 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003404
3405 netdev->hw_enc_features = 0;
3406 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303407 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303408}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303409#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303410
Vasundhara Volamf2858732015-03-04 00:44:33 -05003411static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3412{
3413 struct be_resources res = adapter->pool_res;
3414 u16 num_vf_qs = 1;
3415
3416 /* Distribute the queue resources equally among the PF and it's VFs
3417 * Do not distribute queue resources in multi-channel configuration.
3418 */
3419 if (num_vfs && !be_is_mc(adapter)) {
3420 /* If number of VFs requested is 8 less than max supported,
3421 * assign 8 queue pairs to the PF and divide the remaining
3422 * resources evenly among the VFs
3423 */
3424 if (num_vfs < (be_max_vfs(adapter) - 8))
3425 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3426 else
3427 num_vf_qs = res.max_rss_qs / num_vfs;
3428
3429 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3430 * interfaces per port. Provide RSS on VFs, only if number
3431 * of VFs requested is less than MAX_RSS_IFACES limit.
3432 */
3433 if (num_vfs >= MAX_RSS_IFACES)
3434 num_vf_qs = 1;
3435 }
3436 return num_vf_qs;
3437}
3438
Somnath Koturb05004a2013-12-05 12:08:16 +05303439static int be_clear(struct be_adapter *adapter)
3440{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003441 struct pci_dev *pdev = adapter->pdev;
3442 u16 num_vf_qs;
3443
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303444 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003445
Sathya Perla11ac75e2011-12-13 00:58:50 +00003446 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003447 be_vf_clear(adapter);
3448
Vasundhara Volambec84e62014-06-30 13:01:32 +05303449 /* Re-configure FW to distribute resources evenly across max-supported
3450 * number of VFs, only when VFs are not already enabled.
3451 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003452 if (be_physfn(adapter) && !pci_vfs_assigned(pdev)) {
3453 num_vf_qs = be_calculate_vf_qs(adapter,
3454 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303455 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003456 pci_sriov_get_totalvfs(pdev),
3457 num_vf_qs);
3458 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303459
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303460#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303461 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303462#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303463 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303464 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003465
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003466 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003467
Sathya Perla77071332013-08-27 16:57:34 +05303468 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003469
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003470 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303471 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003472 return 0;
3473}
3474
Kalesh AP0700d812015-01-20 03:51:43 -05003475static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3476 u32 cap_flags, u32 vf)
3477{
3478 u32 en_flags;
3479 int status;
3480
3481 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3482 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003483 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003484
3485 en_flags &= cap_flags;
3486
3487 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3488 if_handle, vf);
3489
3490 return status;
3491}
3492
Sathya Perla4c876612013-02-03 20:30:11 +00003493static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003494{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303495 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003496 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003497 u32 cap_flags, vf;
3498 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003499
Kalesh AP0700d812015-01-20 03:51:43 -05003500 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003501 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3502 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003503
Sathya Perla4c876612013-02-03 20:30:11 +00003504 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303505 if (!BE3_chip(adapter)) {
3506 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003507 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303508 vf + 1);
3509 if (!status)
3510 cap_flags = res.if_cap_flags;
3511 }
Sathya Perla4c876612013-02-03 20:30:11 +00003512
Kalesh AP0700d812015-01-20 03:51:43 -05003513 status = be_if_create(adapter, &vf_cfg->if_handle,
3514 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003515 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003516 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003517 }
Kalesh AP0700d812015-01-20 03:51:43 -05003518
3519 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003520}
3521
Sathya Perla39f1d942012-05-08 19:41:24 +00003522static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003523{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003524 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003525 int vf;
3526
Sathya Perla39f1d942012-05-08 19:41:24 +00003527 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3528 GFP_KERNEL);
3529 if (!adapter->vf_cfg)
3530 return -ENOMEM;
3531
Sathya Perla11ac75e2011-12-13 00:58:50 +00003532 for_all_vfs(adapter, vf_cfg, vf) {
3533 vf_cfg->if_handle = -1;
3534 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003535 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003536 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003537}
3538
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003539static int be_vf_setup(struct be_adapter *adapter)
3540{
Sathya Perla4c876612013-02-03 20:30:11 +00003541 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303542 struct be_vf_cfg *vf_cfg;
3543 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303544 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003545
Sathya Perla257a3fe2013-06-14 15:54:51 +05303546 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003547
3548 status = be_vf_setup_init(adapter);
3549 if (status)
3550 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003551
Sathya Perla4c876612013-02-03 20:30:11 +00003552 if (old_vfs) {
3553 for_all_vfs(adapter, vf_cfg, vf) {
3554 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3555 if (status)
3556 goto err;
3557 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003558
Sathya Perla4c876612013-02-03 20:30:11 +00003559 status = be_vfs_mac_query(adapter);
3560 if (status)
3561 goto err;
3562 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303563 status = be_vfs_if_create(adapter);
3564 if (status)
3565 goto err;
3566
Sathya Perla39f1d942012-05-08 19:41:24 +00003567 status = be_vf_eth_addr_config(adapter);
3568 if (status)
3569 goto err;
3570 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003571
Sathya Perla11ac75e2011-12-13 00:58:50 +00003572 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303573 /* Allow VFs to programs MAC/VLAN filters */
3574 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3575 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3576 status = be_cmd_set_fn_privileges(adapter,
3577 privileges |
3578 BE_PRIV_FILTMGMT,
3579 vf + 1);
3580 if (!status)
3581 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3582 vf);
3583 }
3584
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303585 /* Allow full available bandwidth */
3586 if (!old_vfs)
3587 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003588
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303589 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303590 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303591 be_cmd_set_logical_link_config(adapter,
3592 IFLA_VF_LINK_STATE_AUTO,
3593 vf+1);
3594 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003595 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003596
3597 if (!old_vfs) {
3598 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3599 if (status) {
3600 dev_err(dev, "SRIOV enable failed\n");
3601 adapter->num_vfs = 0;
3602 goto err;
3603 }
3604 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303605
3606 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003607 return 0;
3608err:
Sathya Perla4c876612013-02-03 20:30:11 +00003609 dev_err(dev, "VF setup failed\n");
3610 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003611 return status;
3612}
3613
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303614/* Converting function_mode bits on BE3 to SH mc_type enums */
3615
3616static u8 be_convert_mc_type(u32 function_mode)
3617{
Suresh Reddy66064db2014-06-23 16:41:29 +05303618 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303619 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303620 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303621 return FLEX10;
3622 else if (function_mode & VNIC_MODE)
3623 return vNIC2;
3624 else if (function_mode & UMC_ENABLED)
3625 return UMC;
3626 else
3627 return MC_NONE;
3628}
3629
Sathya Perla92bf14a2013-08-27 16:57:32 +05303630/* On BE2/BE3 FW does not suggest the supported limits */
3631static void BEx_get_resources(struct be_adapter *adapter,
3632 struct be_resources *res)
3633{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303634 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303635
3636 if (be_physfn(adapter))
3637 res->max_uc_mac = BE_UC_PMAC_COUNT;
3638 else
3639 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3640
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303641 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3642
3643 if (be_is_mc(adapter)) {
3644 /* Assuming that there are 4 channels per port,
3645 * when multi-channel is enabled
3646 */
3647 if (be_is_qnq_mode(adapter))
3648 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3649 else
3650 /* In a non-qnq multichannel mode, the pvid
3651 * takes up one vlan entry
3652 */
3653 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3654 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303655 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303656 }
3657
Sathya Perla92bf14a2013-08-27 16:57:32 +05303658 res->max_mcast_mac = BE_MAX_MC;
3659
Vasundhara Volama5243da2014-03-11 18:53:07 +05303660 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3661 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3662 * *only* if it is RSS-capable.
3663 */
3664 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3665 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303666 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303667 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303668 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3669 struct be_resources super_nic_res = {0};
3670
3671 /* On a SuperNIC profile, the driver needs to use the
3672 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3673 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003674 be_cmd_get_profile_config(adapter, &super_nic_res,
3675 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303676 /* Some old versions of BE3 FW don't report max_tx_qs value */
3677 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3678 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303679 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303680 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303681
3682 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3683 !use_sriov && be_physfn(adapter))
3684 res->max_rss_qs = (adapter->be3_native) ?
3685 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3686 res->max_rx_qs = res->max_rss_qs + 1;
3687
Suresh Reddye3dc8672014-01-06 13:02:25 +05303688 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303689 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303690 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3691 else
3692 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303693
3694 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003695 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303696 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3697 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3698}
3699
Sathya Perla30128032011-11-10 19:17:57 +00003700static void be_setup_init(struct be_adapter *adapter)
3701{
3702 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003703 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003704 adapter->if_handle = -1;
3705 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003706 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003707 if (be_physfn(adapter))
3708 adapter->cmd_privileges = MAX_PRIVILEGES;
3709 else
3710 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003711}
3712
Vasundhara Volambec84e62014-06-30 13:01:32 +05303713static int be_get_sriov_config(struct be_adapter *adapter)
3714{
3715 struct device *dev = &adapter->pdev->dev;
3716 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303717 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303718
3719 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003720 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303721
Vasundhara Volambec84e62014-06-30 13:01:32 +05303722 if (BE3_chip(adapter) && !res.max_vfs) {
3723 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3724 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3725 }
3726
Sathya Perlad3d18312014-08-01 17:47:30 +05303727 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303728
3729 if (!be_max_vfs(adapter)) {
3730 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303731 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303732 adapter->num_vfs = 0;
3733 return 0;
3734 }
3735
Sathya Perlad3d18312014-08-01 17:47:30 +05303736 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3737
Vasundhara Volambec84e62014-06-30 13:01:32 +05303738 /* validate num_vfs module param */
3739 old_vfs = pci_num_vf(adapter->pdev);
3740 if (old_vfs) {
3741 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3742 if (old_vfs != num_vfs)
3743 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3744 adapter->num_vfs = old_vfs;
3745 } else {
3746 if (num_vfs > be_max_vfs(adapter)) {
3747 dev_info(dev, "Resources unavailable to init %d VFs\n",
3748 num_vfs);
3749 dev_info(dev, "Limiting to %d VFs\n",
3750 be_max_vfs(adapter));
3751 }
3752 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3753 }
3754
3755 return 0;
3756}
3757
Sathya Perla92bf14a2013-08-27 16:57:32 +05303758static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003759{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303760 struct device *dev = &adapter->pdev->dev;
3761 struct be_resources res = {0};
3762 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003763
Sathya Perla92bf14a2013-08-27 16:57:32 +05303764 if (BEx_chip(adapter)) {
3765 BEx_get_resources(adapter, &res);
3766 adapter->res = res;
3767 }
3768
Sathya Perla92bf14a2013-08-27 16:57:32 +05303769 /* For Lancer, SH etc read per-function resource limits from FW.
3770 * GET_FUNC_CONFIG returns per function guaranteed limits.
3771 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3772 */
Sathya Perla4c876612013-02-03 20:30:11 +00003773 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303774 status = be_cmd_get_func_config(adapter, &res);
3775 if (status)
3776 return status;
3777
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003778 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3779 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3780 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3781 res.max_rss_qs -= 1;
3782
Sathya Perla92bf14a2013-08-27 16:57:32 +05303783 /* If RoCE may be enabled stash away half the EQs for RoCE */
3784 if (be_roce_supported(adapter))
3785 res.max_evt_qs /= 2;
3786 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003787 }
3788
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003789 /* If FW supports RSS default queue, then skip creating non-RSS
3790 * queue for non-IP traffic.
3791 */
3792 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3793 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3794
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303795 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3796 be_max_txqs(adapter), be_max_rxqs(adapter),
3797 be_max_rss(adapter), be_max_eqs(adapter),
3798 be_max_vfs(adapter));
3799 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3800 be_max_uc(adapter), be_max_mc(adapter),
3801 be_max_vlans(adapter));
3802
Sathya Perla92bf14a2013-08-27 16:57:32 +05303803 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003804}
3805
Sathya Perlad3d18312014-08-01 17:47:30 +05303806static void be_sriov_config(struct be_adapter *adapter)
3807{
3808 struct device *dev = &adapter->pdev->dev;
Vasundhara Volamf2858732015-03-04 00:44:33 -05003809 u16 num_vf_qs;
Sathya Perlad3d18312014-08-01 17:47:30 +05303810 int status;
3811
3812 status = be_get_sriov_config(adapter);
3813 if (status) {
3814 dev_err(dev, "Failed to query SR-IOV configuration\n");
3815 dev_err(dev, "SR-IOV cannot be enabled\n");
3816 return;
3817 }
3818
3819 /* When the HW is in SRIOV capable configuration, the PF-pool
3820 * resources are equally distributed across the max-number of
3821 * VFs. The user may request only a subset of the max-vfs to be
3822 * enabled. Based on num_vfs, redistribute the resources across
3823 * num_vfs so that each VF will have access to more number of
3824 * resources. This facility is not available in BE3 FW.
3825 * Also, this is done by FW in Lancer chip.
3826 */
3827 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003828 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
Sathya Perlad3d18312014-08-01 17:47:30 +05303829 status = be_cmd_set_sriov_config(adapter,
3830 adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003831 adapter->num_vfs, num_vf_qs);
Sathya Perlad3d18312014-08-01 17:47:30 +05303832 if (status)
3833 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3834 }
3835}
3836
Sathya Perla39f1d942012-05-08 19:41:24 +00003837static int be_get_config(struct be_adapter *adapter)
3838{
Sathya Perla6b085ba2015-02-23 04:20:09 -05003839 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05303840 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05003841
3842 status = be_cmd_get_cntl_attributes(adapter);
3843 if (status)
3844 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003845
Kalesh APe97e3cd2014-07-17 16:20:26 +05303846 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003847 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303848 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003849
Sathya Perla6b085ba2015-02-23 04:20:09 -05003850 if (BEx_chip(adapter)) {
3851 level = be_cmd_get_fw_log_level(adapter);
3852 adapter->msg_enable =
3853 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3854 }
3855
3856 be_cmd_get_acpi_wol_cap(adapter);
3857
Vasundhara Volam21252372015-02-06 08:18:42 -05003858 be_cmd_query_port_name(adapter);
3859
3860 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303861 status = be_cmd_get_active_profile(adapter, &profile_id);
3862 if (!status)
3863 dev_info(&adapter->pdev->dev,
3864 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303865 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303866
Sathya Perlad3d18312014-08-01 17:47:30 +05303867 if (!BE2_chip(adapter) && be_physfn(adapter))
3868 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303869
Sathya Perla92bf14a2013-08-27 16:57:32 +05303870 status = be_get_resources(adapter);
3871 if (status)
3872 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003873
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303874 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3875 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303876 if (!adapter->pmac_id)
3877 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003878
Sathya Perla92bf14a2013-08-27 16:57:32 +05303879 /* Sanitize cfg_num_qs based on HW and platform limits */
3880 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3881
3882 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003883}
3884
Sathya Perla95046b92013-07-23 15:25:02 +05303885static int be_mac_setup(struct be_adapter *adapter)
3886{
3887 u8 mac[ETH_ALEN];
3888 int status;
3889
3890 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3891 status = be_cmd_get_perm_mac(adapter, mac);
3892 if (status)
3893 return status;
3894
3895 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3896 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3897 } else {
3898 /* Maybe the HW was reset; dev_addr must be re-programmed */
3899 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3900 }
3901
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003902 /* For BE3-R VFs, the PF programs the initial MAC address */
3903 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3904 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3905 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303906 return 0;
3907}
3908
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303909static void be_schedule_worker(struct be_adapter *adapter)
3910{
3911 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3912 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3913}
3914
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003915static void be_schedule_err_detection(struct be_adapter *adapter)
3916{
3917 schedule_delayed_work(&adapter->be_err_detection_work,
3918 msecs_to_jiffies(1000));
3919 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
3920}
3921
Sathya Perla77071332013-08-27 16:57:34 +05303922static int be_setup_queues(struct be_adapter *adapter)
3923{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303924 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303925 int status;
3926
3927 status = be_evt_queues_create(adapter);
3928 if (status)
3929 goto err;
3930
3931 status = be_tx_qs_create(adapter);
3932 if (status)
3933 goto err;
3934
3935 status = be_rx_cqs_create(adapter);
3936 if (status)
3937 goto err;
3938
3939 status = be_mcc_queues_create(adapter);
3940 if (status)
3941 goto err;
3942
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303943 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3944 if (status)
3945 goto err;
3946
3947 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3948 if (status)
3949 goto err;
3950
Sathya Perla77071332013-08-27 16:57:34 +05303951 return 0;
3952err:
3953 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3954 return status;
3955}
3956
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303957int be_update_queues(struct be_adapter *adapter)
3958{
3959 struct net_device *netdev = adapter->netdev;
3960 int status;
3961
3962 if (netif_running(netdev))
3963 be_close(netdev);
3964
3965 be_cancel_worker(adapter);
3966
3967 /* If any vectors have been shared with RoCE we cannot re-program
3968 * the MSIx table.
3969 */
3970 if (!adapter->num_msix_roce_vec)
3971 be_msix_disable(adapter);
3972
3973 be_clear_queues(adapter);
3974
3975 if (!msix_enabled(adapter)) {
3976 status = be_msix_enable(adapter);
3977 if (status)
3978 return status;
3979 }
3980
3981 status = be_setup_queues(adapter);
3982 if (status)
3983 return status;
3984
3985 be_schedule_worker(adapter);
3986
3987 if (netif_running(netdev))
3988 status = be_open(netdev);
3989
3990 return status;
3991}
3992
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003993static inline int fw_major_num(const char *fw_ver)
3994{
3995 int fw_major = 0, i;
3996
3997 i = sscanf(fw_ver, "%d.", &fw_major);
3998 if (i != 1)
3999 return 0;
4000
4001 return fw_major;
4002}
4003
Sathya Perlaf962f842015-02-23 04:20:16 -05004004/* If any VFs are already enabled don't FLR the PF */
4005static bool be_reset_required(struct be_adapter *adapter)
4006{
4007 return pci_num_vf(adapter->pdev) ? false : true;
4008}
4009
4010/* Wait for the FW to be ready and perform the required initialization */
4011static int be_func_init(struct be_adapter *adapter)
4012{
4013 int status;
4014
4015 status = be_fw_wait_ready(adapter);
4016 if (status)
4017 return status;
4018
4019 if (be_reset_required(adapter)) {
4020 status = be_cmd_reset_function(adapter);
4021 if (status)
4022 return status;
4023
4024 /* Wait for interrupts to quiesce after an FLR */
4025 msleep(100);
4026
4027 /* We can clear all errors when function reset succeeds */
4028 be_clear_all_error(adapter);
4029 }
4030
4031 /* Tell FW we're ready to fire cmds */
4032 status = be_cmd_fw_init(adapter);
4033 if (status)
4034 return status;
4035
4036 /* Allow interrupts for other ULPs running on NIC function */
4037 be_intr_set(adapter, true);
4038
4039 return 0;
4040}
4041
Sathya Perla5fb379e2009-06-18 00:02:59 +00004042static int be_setup(struct be_adapter *adapter)
4043{
Sathya Perla39f1d942012-05-08 19:41:24 +00004044 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004045 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004046
Sathya Perlaf962f842015-02-23 04:20:16 -05004047 status = be_func_init(adapter);
4048 if (status)
4049 return status;
4050
Sathya Perla30128032011-11-10 19:17:57 +00004051 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004052
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004053 if (!lancer_chip(adapter))
4054 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004055
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004056 status = be_get_config(adapter);
4057 if (status)
4058 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004059
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004060 status = be_msix_enable(adapter);
4061 if (status)
4062 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004063
Kalesh AP0700d812015-01-20 03:51:43 -05004064 status = be_if_create(adapter, &adapter->if_handle,
4065 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004066 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004067 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004068
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304069 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4070 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304071 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304072 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004073 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004074 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004075
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004076 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004077
Sathya Perla95046b92013-07-23 15:25:02 +05304078 status = be_mac_setup(adapter);
4079 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004080 goto err;
4081
Kalesh APe97e3cd2014-07-17 16:20:26 +05304082 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304083 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004084
Somnath Koture9e2a902013-10-24 14:37:53 +05304085 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304086 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304087 adapter->fw_ver);
4088 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4089 }
4090
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004091 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004092 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004093
4094 be_set_rx_mode(adapter->netdev);
4095
Kalesh AP00d594c2015-01-20 03:51:44 -05004096 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4097 adapter->rx_fc);
4098 if (status)
4099 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4100 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004101
Kalesh AP00d594c2015-01-20 03:51:44 -05004102 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4103 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004104
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304105 if (be_physfn(adapter))
4106 be_cmd_set_logical_link_config(adapter,
4107 IFLA_VF_LINK_STATE_AUTO, 0);
4108
Vasundhara Volambec84e62014-06-30 13:01:32 +05304109 if (adapter->num_vfs)
4110 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004111
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004112 status = be_cmd_get_phy_info(adapter);
4113 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004114 adapter->phy.fc_autoneg = 1;
4115
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304116 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304117 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004118 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004119err:
4120 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004121 return status;
4122}
4123
Ivan Vecera66268732011-12-08 01:31:21 +00004124#ifdef CONFIG_NET_POLL_CONTROLLER
4125static void be_netpoll(struct net_device *netdev)
4126{
4127 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004128 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004129 int i;
4130
Sathya Perlae49cc342012-11-27 19:50:02 +00004131 for_all_evt_queues(adapter, eqo, i) {
4132 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4133 napi_schedule(&eqo->napi);
4134 }
Ivan Vecera66268732011-12-08 01:31:21 +00004135}
4136#endif
4137
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304138static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004139
Sathya Perla306f1342011-08-02 19:57:45 +00004140static bool phy_flashing_required(struct be_adapter *adapter)
4141{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004142 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004143 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004144}
4145
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004146static bool is_comp_in_ufi(struct be_adapter *adapter,
4147 struct flash_section_info *fsec, int type)
4148{
4149 int i = 0, img_type = 0;
4150 struct flash_section_info_g2 *fsec_g2 = NULL;
4151
Sathya Perlaca34fe32012-11-06 17:48:56 +00004152 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004153 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4154
4155 for (i = 0; i < MAX_FLASH_COMP; i++) {
4156 if (fsec_g2)
4157 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4158 else
4159 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4160
4161 if (img_type == type)
4162 return true;
4163 }
4164 return false;
4165
4166}
4167
Jingoo Han4188e7d2013-08-05 18:02:02 +09004168static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304169 int header_size,
4170 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004171{
4172 struct flash_section_info *fsec = NULL;
4173 const u8 *p = fw->data;
4174
4175 p += header_size;
4176 while (p < (fw->data + fw->size)) {
4177 fsec = (struct flash_section_info *)p;
4178 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4179 return fsec;
4180 p += 32;
4181 }
4182 return NULL;
4183}
4184
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304185static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4186 u32 img_offset, u32 img_size, int hdr_size,
4187 u16 img_optype, bool *crc_match)
4188{
4189 u32 crc_offset;
4190 int status;
4191 u8 crc[4];
4192
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004193 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4194 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304195 if (status)
4196 return status;
4197
4198 crc_offset = hdr_size + img_offset + img_size - 4;
4199
4200 /* Skip flashing, if crc of flashed region matches */
4201 if (!memcmp(crc, p + crc_offset, 4))
4202 *crc_match = true;
4203 else
4204 *crc_match = false;
4205
4206 return status;
4207}
4208
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004209static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004210 struct be_dma_mem *flash_cmd, int optype, int img_size,
4211 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004212{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004213 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004214 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304215 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004216
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004217 while (total_bytes) {
4218 num_bytes = min_t(u32, 32*1024, total_bytes);
4219
4220 total_bytes -= num_bytes;
4221
4222 if (!total_bytes) {
4223 if (optype == OPTYPE_PHY_FW)
4224 flash_op = FLASHROM_OPER_PHY_FLASH;
4225 else
4226 flash_op = FLASHROM_OPER_FLASH;
4227 } else {
4228 if (optype == OPTYPE_PHY_FW)
4229 flash_op = FLASHROM_OPER_PHY_SAVE;
4230 else
4231 flash_op = FLASHROM_OPER_SAVE;
4232 }
4233
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004234 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004235 img += num_bytes;
4236 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004237 flash_op, img_offset +
4238 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304239 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304240 optype == OPTYPE_PHY_FW)
4241 break;
4242 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004243 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004244
4245 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004246 }
4247 return 0;
4248}
4249
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004250/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004251static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304252 const struct firmware *fw,
4253 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004254{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004255 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304256 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004257 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304258 int status, i, filehdr_size, num_comp;
4259 const struct flash_comp *pflashcomp;
4260 bool crc_match;
4261 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004262
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004263 struct flash_comp gen3_flash_types[] = {
4264 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4265 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4266 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4267 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4268 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4269 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4270 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4271 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4272 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4273 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4274 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4275 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4276 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4277 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4278 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4279 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4280 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4281 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4282 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4283 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004284 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004285
4286 struct flash_comp gen2_flash_types[] = {
4287 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4288 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4289 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4290 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4291 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4292 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4293 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4294 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4295 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4296 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4297 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4298 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4299 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4300 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4301 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4302 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004303 };
4304
Sathya Perlaca34fe32012-11-06 17:48:56 +00004305 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004306 pflashcomp = gen3_flash_types;
4307 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004308 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004309 } else {
4310 pflashcomp = gen2_flash_types;
4311 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004312 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004313 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004314 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004315
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004316 /* Get flash section info*/
4317 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4318 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304319 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004320 return -1;
4321 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004322 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004323 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004324 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004325
4326 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4327 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4328 continue;
4329
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004330 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4331 !phy_flashing_required(adapter))
4332 continue;
4333
4334 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304335 status = be_check_flash_crc(adapter, fw->data,
4336 pflashcomp[i].offset,
4337 pflashcomp[i].size,
4338 filehdr_size +
4339 img_hdrs_size,
4340 OPTYPE_REDBOOT, &crc_match);
4341 if (status) {
4342 dev_err(dev,
4343 "Could not get CRC for 0x%x region\n",
4344 pflashcomp[i].optype);
4345 continue;
4346 }
4347
4348 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004349 continue;
4350 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004351
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304352 p = fw->data + filehdr_size + pflashcomp[i].offset +
4353 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004354 if (p + pflashcomp[i].size > fw->data + fw->size)
4355 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004356
4357 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004358 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004359 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304360 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004361 pflashcomp[i].img_type);
4362 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004363 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004364 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004365 return 0;
4366}
4367
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304368static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4369{
4370 u32 img_type = le32_to_cpu(fsec_entry.type);
4371 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4372
4373 if (img_optype != 0xFFFF)
4374 return img_optype;
4375
4376 switch (img_type) {
4377 case IMAGE_FIRMWARE_iSCSI:
4378 img_optype = OPTYPE_ISCSI_ACTIVE;
4379 break;
4380 case IMAGE_BOOT_CODE:
4381 img_optype = OPTYPE_REDBOOT;
4382 break;
4383 case IMAGE_OPTION_ROM_ISCSI:
4384 img_optype = OPTYPE_BIOS;
4385 break;
4386 case IMAGE_OPTION_ROM_PXE:
4387 img_optype = OPTYPE_PXE_BIOS;
4388 break;
4389 case IMAGE_OPTION_ROM_FCoE:
4390 img_optype = OPTYPE_FCOE_BIOS;
4391 break;
4392 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4393 img_optype = OPTYPE_ISCSI_BACKUP;
4394 break;
4395 case IMAGE_NCSI:
4396 img_optype = OPTYPE_NCSI_FW;
4397 break;
4398 case IMAGE_FLASHISM_JUMPVECTOR:
4399 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4400 break;
4401 case IMAGE_FIRMWARE_PHY:
4402 img_optype = OPTYPE_SH_PHY_FW;
4403 break;
4404 case IMAGE_REDBOOT_DIR:
4405 img_optype = OPTYPE_REDBOOT_DIR;
4406 break;
4407 case IMAGE_REDBOOT_CONFIG:
4408 img_optype = OPTYPE_REDBOOT_CONFIG;
4409 break;
4410 case IMAGE_UFI_DIR:
4411 img_optype = OPTYPE_UFI_DIR;
4412 break;
4413 default:
4414 break;
4415 }
4416
4417 return img_optype;
4418}
4419
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004420static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304421 const struct firmware *fw,
4422 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004423{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004424 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004425 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304426 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004427 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304428 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004429 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304430 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304431 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004432
4433 filehdr_size = sizeof(struct flash_file_hdr_g3);
4434 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4435 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304436 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304437 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004438 }
4439
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004440retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004441 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4442 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4443 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304444 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4445 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4446 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004447
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304448 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004449 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004450
4451 if (flash_offset_support)
4452 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4453 else
4454 flash_optype = img_optype;
4455
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304456 /* Don't bother verifying CRC if an old FW image is being
4457 * flashed
4458 */
4459 if (old_fw_img)
4460 goto flash;
4461
4462 status = be_check_flash_crc(adapter, fw->data, img_offset,
4463 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004464 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304465 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304466 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4467 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004468 /* The current FW image on the card does not support
4469 * OFFSET based flashing. Retry using older mechanism
4470 * of OPTYPE based flashing
4471 */
4472 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4473 flash_offset_support = false;
4474 goto retry_flash;
4475 }
4476
4477 /* The current FW image on the card does not recognize
4478 * the new FLASH op_type. The FW download is partially
4479 * complete. Reboot the server now to enable FW image
4480 * to recognize the new FLASH op_type. To complete the
4481 * remaining process, download the same FW again after
4482 * the reboot.
4483 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304484 dev_err(dev, "Flash incomplete. Reset the server\n");
4485 dev_err(dev, "Download FW image again after reset\n");
4486 return -EAGAIN;
4487 } else if (status) {
4488 dev_err(dev, "Could not get CRC for 0x%x region\n",
4489 img_optype);
4490 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004491 }
4492
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304493 if (crc_match)
4494 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004495
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304496flash:
4497 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004498 if (p + img_size > fw->data + fw->size)
4499 return -1;
4500
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004501 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4502 img_offset);
4503
4504 /* The current FW image on the card does not support OFFSET
4505 * based flashing. Retry using older mechanism of OPTYPE based
4506 * flashing
4507 */
4508 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4509 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4510 flash_offset_support = false;
4511 goto retry_flash;
4512 }
4513
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304514 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4515 * UFI_DIR region
4516 */
Kalesh AP4c600052014-05-30 19:06:26 +05304517 if (old_fw_img &&
4518 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4519 (img_optype == OPTYPE_UFI_DIR &&
4520 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304521 continue;
4522 } else if (status) {
4523 dev_err(dev, "Flashing section type 0x%x failed\n",
4524 img_type);
4525 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004526 }
4527 }
4528 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004529}
4530
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004531static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304532 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004533{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004534#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4535#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304536 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004537 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004538 const u8 *data_ptr = NULL;
4539 u8 *dest_image_ptr = NULL;
4540 size_t image_size = 0;
4541 u32 chunk_size = 0;
4542 u32 data_written = 0;
4543 u32 offset = 0;
4544 int status = 0;
4545 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004546 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004547
4548 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304549 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304550 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004551 }
4552
4553 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4554 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304555 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004556 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304557 if (!flash_cmd.va)
4558 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004559
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004560 dest_image_ptr = flash_cmd.va +
4561 sizeof(struct lancer_cmd_req_write_object);
4562 image_size = fw->size;
4563 data_ptr = fw->data;
4564
4565 while (image_size) {
4566 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4567
4568 /* Copy the image chunk content. */
4569 memcpy(dest_image_ptr, data_ptr, chunk_size);
4570
4571 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004572 chunk_size, offset,
4573 LANCER_FW_DOWNLOAD_LOCATION,
4574 &data_written, &change_status,
4575 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004576 if (status)
4577 break;
4578
4579 offset += data_written;
4580 data_ptr += data_written;
4581 image_size -= data_written;
4582 }
4583
4584 if (!status) {
4585 /* Commit the FW written */
4586 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004587 0, offset,
4588 LANCER_FW_DOWNLOAD_LOCATION,
4589 &data_written, &change_status,
4590 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004591 }
4592
Kalesh APbb864e02014-09-02 09:56:51 +05304593 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004594 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304595 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304596 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004597 }
4598
Kalesh APbb864e02014-09-02 09:56:51 +05304599 dev_info(dev, "Firmware flashed successfully\n");
4600
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004601 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304602 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004603 status = lancer_physdev_ctrl(adapter,
4604 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004605 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304606 dev_err(dev, "Adapter busy, could not reset FW\n");
4607 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004608 }
4609 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304610 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004611 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304612
4613 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004614}
4615
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004616#define BE2_UFI 2
4617#define BE3_UFI 3
4618#define BE3R_UFI 10
4619#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004620#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004621
Sathya Perlaca34fe32012-11-06 17:48:56 +00004622static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004623 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004624{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004625 if (!fhdr) {
4626 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4627 return -1;
4628 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004629
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004630 /* First letter of the build version is used to identify
4631 * which chip this image file is meant for.
4632 */
4633 switch (fhdr->build[0]) {
4634 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004635 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4636 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004637 case BLD_STR_UFI_TYPE_BE3:
4638 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4639 BE3_UFI;
4640 case BLD_STR_UFI_TYPE_BE2:
4641 return BE2_UFI;
4642 default:
4643 return -1;
4644 }
4645}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004646
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004647/* Check if the flash image file is compatible with the adapter that
4648 * is being flashed.
4649 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004650 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004651 */
4652static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4653 struct flash_file_hdr_g3 *fhdr)
4654{
4655 int ufi_type = be_get_ufi_type(adapter, fhdr);
4656
4657 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004658 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004659 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004660 case SH_UFI:
4661 return (skyhawk_chip(adapter) &&
4662 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004663 case BE3R_UFI:
4664 return BE3_chip(adapter);
4665 case BE3_UFI:
4666 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4667 case BE2_UFI:
4668 return BE2_chip(adapter);
4669 default:
4670 return false;
4671 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004672}
4673
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004674static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4675{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004676 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004677 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004678 struct image_hdr *img_hdr_ptr;
4679 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004680 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004681
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004682 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4683 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4684 dev_err(dev, "Flash image is not compatible with adapter\n");
4685 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004686 }
4687
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004688 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4689 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4690 GFP_KERNEL);
4691 if (!flash_cmd.va)
4692 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004693
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004694 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4695 for (i = 0; i < num_imgs; i++) {
4696 img_hdr_ptr = (struct image_hdr *)(fw->data +
4697 (sizeof(struct flash_file_hdr_g3) +
4698 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004699 if (!BE2_chip(adapter) &&
4700 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4701 continue;
4702
4703 if (skyhawk_chip(adapter))
4704 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4705 num_imgs);
4706 else
4707 status = be_flash_BEx(adapter, fw, &flash_cmd,
4708 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004709 }
4710
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004711 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4712 if (!status)
4713 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004714
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004715 return status;
4716}
4717
4718int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4719{
4720 const struct firmware *fw;
4721 int status;
4722
4723 if (!netif_running(adapter->netdev)) {
4724 dev_err(&adapter->pdev->dev,
4725 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304726 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004727 }
4728
4729 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4730 if (status)
4731 goto fw_exit;
4732
4733 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4734
4735 if (lancer_chip(adapter))
4736 status = lancer_fw_download(adapter, fw);
4737 else
4738 status = be_fw_download(adapter, fw);
4739
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004740 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304741 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004742
Ajit Khaparde84517482009-09-04 03:12:16 +00004743fw_exit:
4744 release_firmware(fw);
4745 return status;
4746}
4747
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004748static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4749 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004750{
4751 struct be_adapter *adapter = netdev_priv(dev);
4752 struct nlattr *attr, *br_spec;
4753 int rem;
4754 int status = 0;
4755 u16 mode = 0;
4756
4757 if (!sriov_enabled(adapter))
4758 return -EOPNOTSUPP;
4759
4760 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004761 if (!br_spec)
4762 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004763
4764 nla_for_each_nested(attr, br_spec, rem) {
4765 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4766 continue;
4767
Thomas Grafb7c1a312014-11-26 13:42:17 +01004768 if (nla_len(attr) < sizeof(mode))
4769 return -EINVAL;
4770
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004771 mode = nla_get_u16(attr);
4772 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4773 return -EINVAL;
4774
4775 status = be_cmd_set_hsw_config(adapter, 0, 0,
4776 adapter->if_handle,
4777 mode == BRIDGE_MODE_VEPA ?
4778 PORT_FWD_TYPE_VEPA :
4779 PORT_FWD_TYPE_VEB);
4780 if (status)
4781 goto err;
4782
4783 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4784 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4785
4786 return status;
4787 }
4788err:
4789 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4790 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4791
4792 return status;
4793}
4794
4795static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304796 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004797{
4798 struct be_adapter *adapter = netdev_priv(dev);
4799 int status = 0;
4800 u8 hsw_mode;
4801
4802 if (!sriov_enabled(adapter))
4803 return 0;
4804
4805 /* BE and Lancer chips support VEB mode only */
4806 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4807 hsw_mode = PORT_FWD_TYPE_VEB;
4808 } else {
4809 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4810 adapter->if_handle, &hsw_mode);
4811 if (status)
4812 return 0;
4813 }
4814
4815 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4816 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004817 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4818 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004819}
4820
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304821#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004822/* VxLAN offload Notes:
4823 *
4824 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4825 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4826 * is expected to work across all types of IP tunnels once exported. Skyhawk
4827 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304828 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4829 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4830 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004831 *
4832 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4833 * adds more than one port, disable offloads and don't re-enable them again
4834 * until after all the tunnels are removed.
4835 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304836static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4837 __be16 port)
4838{
4839 struct be_adapter *adapter = netdev_priv(netdev);
4840 struct device *dev = &adapter->pdev->dev;
4841 int status;
4842
4843 if (lancer_chip(adapter) || BEx_chip(adapter))
4844 return;
4845
4846 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304847 dev_info(dev,
4848 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004849 dev_info(dev, "Disabling VxLAN offloads\n");
4850 adapter->vxlan_port_count++;
4851 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304852 }
4853
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004854 if (adapter->vxlan_port_count++ >= 1)
4855 return;
4856
Sathya Perlac9c47142014-03-27 10:46:19 +05304857 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4858 OP_CONVERT_NORMAL_TO_TUNNEL);
4859 if (status) {
4860 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4861 goto err;
4862 }
4863
4864 status = be_cmd_set_vxlan_port(adapter, port);
4865 if (status) {
4866 dev_warn(dev, "Failed to add VxLAN port\n");
4867 goto err;
4868 }
4869 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4870 adapter->vxlan_port = port;
4871
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004872 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4873 NETIF_F_TSO | NETIF_F_TSO6 |
4874 NETIF_F_GSO_UDP_TUNNEL;
4875 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304876 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004877
Sathya Perlac9c47142014-03-27 10:46:19 +05304878 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4879 be16_to_cpu(port));
4880 return;
4881err:
4882 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304883}
4884
4885static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4886 __be16 port)
4887{
4888 struct be_adapter *adapter = netdev_priv(netdev);
4889
4890 if (lancer_chip(adapter) || BEx_chip(adapter))
4891 return;
4892
4893 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004894 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304895
4896 be_disable_vxlan_offloads(adapter);
4897
4898 dev_info(&adapter->pdev->dev,
4899 "Disabled VxLAN offloads for UDP port %d\n",
4900 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004901done:
4902 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304903}
Joe Stringer725d5482014-11-13 16:38:13 -08004904
Jesse Gross5f352272014-12-23 22:37:26 -08004905static netdev_features_t be_features_check(struct sk_buff *skb,
4906 struct net_device *dev,
4907 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004908{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304909 struct be_adapter *adapter = netdev_priv(dev);
4910 u8 l4_hdr = 0;
4911
4912 /* The code below restricts offload features for some tunneled packets.
4913 * Offload features for normal (non tunnel) packets are unchanged.
4914 */
4915 if (!skb->encapsulation ||
4916 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4917 return features;
4918
4919 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4920 * should disable tunnel offload features if it's not a VxLAN packet,
4921 * as tunnel offloads have been enabled only for VxLAN. This is done to
4922 * allow other tunneled traffic like GRE work fine while VxLAN
4923 * offloads are configured in Skyhawk-R.
4924 */
4925 switch (vlan_get_protocol(skb)) {
4926 case htons(ETH_P_IP):
4927 l4_hdr = ip_hdr(skb)->protocol;
4928 break;
4929 case htons(ETH_P_IPV6):
4930 l4_hdr = ipv6_hdr(skb)->nexthdr;
4931 break;
4932 default:
4933 return features;
4934 }
4935
4936 if (l4_hdr != IPPROTO_UDP ||
4937 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4938 skb->inner_protocol != htons(ETH_P_TEB) ||
4939 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4940 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4941 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4942
4943 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004944}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304945#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304946
stephen hemmingere5686ad2012-01-05 19:10:25 +00004947static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004948 .ndo_open = be_open,
4949 .ndo_stop = be_close,
4950 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004951 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004952 .ndo_set_mac_address = be_mac_addr_set,
4953 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004954 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004955 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004956 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4957 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004958 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004959 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004960 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004961 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304962 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004963#ifdef CONFIG_NET_POLL_CONTROLLER
4964 .ndo_poll_controller = be_netpoll,
4965#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004966 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4967 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304968#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304969 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304970#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304971#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304972 .ndo_add_vxlan_port = be_add_vxlan_port,
4973 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004974 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304975#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004976};
4977
4978static void be_netdev_init(struct net_device *netdev)
4979{
4980 struct be_adapter *adapter = netdev_priv(netdev);
4981
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004982 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004983 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004984 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004985 if (be_multi_rxq(adapter))
4986 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004987
4988 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004989 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004990
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004991 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004992 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004993
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004994 netdev->priv_flags |= IFF_UNICAST_FLT;
4995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004996 netdev->flags |= IFF_MULTICAST;
4997
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004998 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005000 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005001
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005002 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005003}
5004
Kalesh AP87ac1a52015-02-23 04:20:15 -05005005static void be_cleanup(struct be_adapter *adapter)
5006{
5007 struct net_device *netdev = adapter->netdev;
5008
5009 rtnl_lock();
5010 netif_device_detach(netdev);
5011 if (netif_running(netdev))
5012 be_close(netdev);
5013 rtnl_unlock();
5014
5015 be_clear(adapter);
5016}
5017
Kalesh AP484d76f2015-02-23 04:20:14 -05005018static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005019{
Kalesh APd0e1b312015-02-23 04:20:12 -05005020 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005021 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005022
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005023 status = be_setup(adapter);
5024 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005025 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005026
Kalesh APd0e1b312015-02-23 04:20:12 -05005027 if (netif_running(netdev)) {
5028 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005029 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005030 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005031 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005032
Kalesh APd0e1b312015-02-23 04:20:12 -05005033 netif_device_attach(netdev);
5034
Kalesh AP484d76f2015-02-23 04:20:14 -05005035 return 0;
5036}
5037
5038static int be_err_recover(struct be_adapter *adapter)
5039{
5040 struct device *dev = &adapter->pdev->dev;
5041 int status;
5042
5043 status = be_resume(adapter);
5044 if (status)
5045 goto err;
5046
Sathya Perla9fa465c2015-02-23 04:20:13 -05005047 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005048 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005049err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005050 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305051 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005052 else
5053 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005054
5055 return status;
5056}
5057
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005058static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005059{
5060 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005061 container_of(work, struct be_adapter,
5062 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005063 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005064
5065 be_detect_error(adapter);
5066
Kalesh APd0e1b312015-02-23 04:20:12 -05005067 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005068 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005069
5070 /* As of now error recovery support is in Lancer only */
5071 if (lancer_chip(adapter))
5072 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005073 }
5074
Sathya Perla9fa465c2015-02-23 04:20:13 -05005075 /* Always attempt recovery on VFs */
5076 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005077 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005078}
5079
Vasundhara Volam21252372015-02-06 08:18:42 -05005080static void be_log_sfp_info(struct be_adapter *adapter)
5081{
5082 int status;
5083
5084 status = be_cmd_query_sfp_info(adapter);
5085 if (!status) {
5086 dev_err(&adapter->pdev->dev,
5087 "Unqualified SFP+ detected on %c from %s part no: %s",
5088 adapter->port_name, adapter->phy.vendor_name,
5089 adapter->phy.vendor_pn);
5090 }
5091 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5092}
5093
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005094static void be_worker(struct work_struct *work)
5095{
5096 struct be_adapter *adapter =
5097 container_of(work, struct be_adapter, work.work);
5098 struct be_rx_obj *rxo;
5099 int i;
5100
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005101 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005102 * mcc completions
5103 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005104 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005105 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005106 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005107 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005108 goto reschedule;
5109 }
5110
5111 if (!adapter->stats_cmd_sent) {
5112 if (lancer_chip(adapter))
5113 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305114 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005115 else
5116 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5117 }
5118
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305119 if (be_physfn(adapter) &&
5120 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005121 be_cmd_get_die_temperature(adapter);
5122
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005123 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305124 /* Replenish RX-queues starved due to memory
5125 * allocation failures.
5126 */
5127 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305128 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005129 }
5130
Sathya Perla2632baf2013-10-01 16:00:00 +05305131 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005132
Vasundhara Volam21252372015-02-06 08:18:42 -05005133 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5134 be_log_sfp_info(adapter);
5135
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005136reschedule:
5137 adapter->work_counter++;
5138 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5139}
5140
Sathya Perla78fad34e2015-02-23 04:20:08 -05005141static void be_unmap_pci_bars(struct be_adapter *adapter)
5142{
5143 if (adapter->csr)
5144 pci_iounmap(adapter->pdev, adapter->csr);
5145 if (adapter->db)
5146 pci_iounmap(adapter->pdev, adapter->db);
5147}
5148
5149static int db_bar(struct be_adapter *adapter)
5150{
5151 if (lancer_chip(adapter) || !be_physfn(adapter))
5152 return 0;
5153 else
5154 return 4;
5155}
5156
5157static int be_roce_map_pci_bars(struct be_adapter *adapter)
5158{
5159 if (skyhawk_chip(adapter)) {
5160 adapter->roce_db.size = 4096;
5161 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5162 db_bar(adapter));
5163 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5164 db_bar(adapter));
5165 }
5166 return 0;
5167}
5168
5169static int be_map_pci_bars(struct be_adapter *adapter)
5170{
5171 u8 __iomem *addr;
5172 u32 sli_intf;
5173
5174 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5175 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5176 SLI_INTF_FAMILY_SHIFT;
5177 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5178
5179 if (BEx_chip(adapter) && be_physfn(adapter)) {
5180 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
5181 if (!adapter->csr)
5182 return -ENOMEM;
5183 }
5184
5185 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
5186 if (!addr)
5187 goto pci_map_err;
5188 adapter->db = addr;
5189
5190 be_roce_map_pci_bars(adapter);
5191 return 0;
5192
5193pci_map_err:
5194 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
5195 be_unmap_pci_bars(adapter);
5196 return -ENOMEM;
5197}
5198
5199static void be_drv_cleanup(struct be_adapter *adapter)
5200{
5201 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5202 struct device *dev = &adapter->pdev->dev;
5203
5204 if (mem->va)
5205 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5206
5207 mem = &adapter->rx_filter;
5208 if (mem->va)
5209 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5210
5211 mem = &adapter->stats_cmd;
5212 if (mem->va)
5213 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5214}
5215
5216/* Allocate and initialize various fields in be_adapter struct */
5217static int be_drv_init(struct be_adapter *adapter)
5218{
5219 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5220 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5221 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5222 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5223 struct device *dev = &adapter->pdev->dev;
5224 int status = 0;
5225
5226 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5227 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5228 &mbox_mem_alloc->dma,
5229 GFP_KERNEL);
5230 if (!mbox_mem_alloc->va)
5231 return -ENOMEM;
5232
5233 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5234 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5235 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5236 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5237
5238 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5239 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5240 &rx_filter->dma, GFP_KERNEL);
5241 if (!rx_filter->va) {
5242 status = -ENOMEM;
5243 goto free_mbox;
5244 }
5245
5246 if (lancer_chip(adapter))
5247 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5248 else if (BE2_chip(adapter))
5249 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5250 else if (BE3_chip(adapter))
5251 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5252 else
5253 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5254 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5255 &stats_cmd->dma, GFP_KERNEL);
5256 if (!stats_cmd->va) {
5257 status = -ENOMEM;
5258 goto free_rx_filter;
5259 }
5260
5261 mutex_init(&adapter->mbox_lock);
5262 spin_lock_init(&adapter->mcc_lock);
5263 spin_lock_init(&adapter->mcc_cq_lock);
5264 init_completion(&adapter->et_cmd_compl);
5265
5266 pci_save_state(adapter->pdev);
5267
5268 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005269 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5270 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005271
5272 adapter->rx_fc = true;
5273 adapter->tx_fc = true;
5274
5275 /* Must be a power of 2 or else MODULO will BUG_ON */
5276 adapter->be_get_temp_freq = 64;
5277 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
5278
5279 return 0;
5280
5281free_rx_filter:
5282 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5283free_mbox:
5284 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5285 mbox_mem_alloc->dma);
5286 return status;
5287}
5288
5289static void be_remove(struct pci_dev *pdev)
5290{
5291 struct be_adapter *adapter = pci_get_drvdata(pdev);
5292
5293 if (!adapter)
5294 return;
5295
5296 be_roce_dev_remove(adapter);
5297 be_intr_set(adapter, false);
5298
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005299 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005300
5301 unregister_netdev(adapter->netdev);
5302
5303 be_clear(adapter);
5304
5305 /* tell fw we're done with firing cmds */
5306 be_cmd_fw_clean(adapter);
5307
5308 be_unmap_pci_bars(adapter);
5309 be_drv_cleanup(adapter);
5310
5311 pci_disable_pcie_error_reporting(pdev);
5312
5313 pci_release_regions(pdev);
5314 pci_disable_device(pdev);
5315
5316 free_netdev(adapter->netdev);
5317}
5318
Sathya Perlad3791422012-09-28 04:39:44 +00005319static char *mc_name(struct be_adapter *adapter)
5320{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305321 char *str = ""; /* default */
5322
5323 switch (adapter->mc_type) {
5324 case UMC:
5325 str = "UMC";
5326 break;
5327 case FLEX10:
5328 str = "FLEX10";
5329 break;
5330 case vNIC1:
5331 str = "vNIC-1";
5332 break;
5333 case nPAR:
5334 str = "nPAR";
5335 break;
5336 case UFP:
5337 str = "UFP";
5338 break;
5339 case vNIC2:
5340 str = "vNIC-2";
5341 break;
5342 default:
5343 str = "";
5344 }
5345
5346 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005347}
5348
5349static inline char *func_name(struct be_adapter *adapter)
5350{
5351 return be_physfn(adapter) ? "PF" : "VF";
5352}
5353
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005354static inline char *nic_name(struct pci_dev *pdev)
5355{
5356 switch (pdev->device) {
5357 case OC_DEVICE_ID1:
5358 return OC_NAME;
5359 case OC_DEVICE_ID2:
5360 return OC_NAME_BE;
5361 case OC_DEVICE_ID3:
5362 case OC_DEVICE_ID4:
5363 return OC_NAME_LANCER;
5364 case BE_DEVICE_ID2:
5365 return BE3_NAME;
5366 case OC_DEVICE_ID5:
5367 case OC_DEVICE_ID6:
5368 return OC_NAME_SH;
5369 default:
5370 return BE_NAME;
5371 }
5372}
5373
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005374static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005375{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005376 struct be_adapter *adapter;
5377 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005378 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005379
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305380 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005382 status = pci_enable_device(pdev);
5383 if (status)
5384 goto do_none;
5385
5386 status = pci_request_regions(pdev, DRV_NAME);
5387 if (status)
5388 goto disable_dev;
5389 pci_set_master(pdev);
5390
Sathya Perla7f640062012-06-05 19:37:20 +00005391 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305392 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005393 status = -ENOMEM;
5394 goto rel_reg;
5395 }
5396 adapter = netdev_priv(netdev);
5397 adapter->pdev = pdev;
5398 pci_set_drvdata(pdev, adapter);
5399 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005400 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005401
Russell King4c15c242013-06-26 23:49:11 +01005402 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005403 if (!status) {
5404 netdev->features |= NETIF_F_HIGHDMA;
5405 } else {
Russell King4c15c242013-06-26 23:49:11 +01005406 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005407 if (status) {
5408 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5409 goto free_netdev;
5410 }
5411 }
5412
Kalesh AP2f951a92014-09-12 17:39:21 +05305413 status = pci_enable_pcie_error_reporting(pdev);
5414 if (!status)
5415 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005416
Sathya Perla78fad34e2015-02-23 04:20:08 -05005417 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005418 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005419 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005420
Sathya Perla78fad34e2015-02-23 04:20:08 -05005421 status = be_drv_init(adapter);
5422 if (status)
5423 goto unmap_bars;
5424
Sathya Perla5fb379e2009-06-18 00:02:59 +00005425 status = be_setup(adapter);
5426 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005427 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005428
Sathya Perla3abcded2010-10-03 22:12:27 -07005429 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005430 status = register_netdev(netdev);
5431 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005432 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005433
Parav Pandit045508a2012-03-26 14:27:13 +00005434 be_roce_dev_add(adapter);
5435
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005436 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005437
Sathya Perlad3791422012-09-28 04:39:44 +00005438 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005439 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005440
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005441 return 0;
5442
Sathya Perla5fb379e2009-06-18 00:02:59 +00005443unsetup:
5444 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005445drv_cleanup:
5446 be_drv_cleanup(adapter);
5447unmap_bars:
5448 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005449free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005450 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005451rel_reg:
5452 pci_release_regions(pdev);
5453disable_dev:
5454 pci_disable_device(pdev);
5455do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005456 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005457 return status;
5458}
5459
5460static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5461{
5462 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005463
Suresh Reddy76a9e082014-01-15 13:23:40 +05305464 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005465 be_setup_wol(adapter, true);
5466
Ajit Khaparded4360d62013-11-22 12:51:09 -06005467 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005468 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005469
Kalesh AP87ac1a52015-02-23 04:20:15 -05005470 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005471
5472 pci_save_state(pdev);
5473 pci_disable_device(pdev);
5474 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5475 return 0;
5476}
5477
Kalesh AP484d76f2015-02-23 04:20:14 -05005478static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005479{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005480 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005481 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005482
5483 status = pci_enable_device(pdev);
5484 if (status)
5485 return status;
5486
Yijing Wang1ca01512013-06-27 20:53:42 +08005487 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005488 pci_restore_state(pdev);
5489
Kalesh AP484d76f2015-02-23 04:20:14 -05005490 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005491 if (status)
5492 return status;
5493
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005494 be_schedule_err_detection(adapter);
5495
Suresh Reddy76a9e082014-01-15 13:23:40 +05305496 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005497 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005499 return 0;
5500}
5501
Sathya Perla82456b02010-02-17 01:35:37 +00005502/*
5503 * An FLR will stop BE from DMAing any data.
5504 */
5505static void be_shutdown(struct pci_dev *pdev)
5506{
5507 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005508
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005509 if (!adapter)
5510 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005511
Devesh Sharmad114f992014-06-10 19:32:15 +05305512 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005513 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005514 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005515
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005516 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005517
Ajit Khaparde57841862011-04-06 18:08:43 +00005518 be_cmd_reset_function(adapter);
5519
Sathya Perla82456b02010-02-17 01:35:37 +00005520 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005521}
5522
Sathya Perlacf588472010-02-14 21:22:01 +00005523static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305524 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005525{
5526 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005527
5528 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5529
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005530 if (!adapter->eeh_error) {
5531 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005532
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005533 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005534
Kalesh AP87ac1a52015-02-23 04:20:15 -05005535 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005536 }
Sathya Perlacf588472010-02-14 21:22:01 +00005537
5538 if (state == pci_channel_io_perm_failure)
5539 return PCI_ERS_RESULT_DISCONNECT;
5540
5541 pci_disable_device(pdev);
5542
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005543 /* The error could cause the FW to trigger a flash debug dump.
5544 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005545 * can cause it not to recover; wait for it to finish.
5546 * Wait only for first function as it is needed only once per
5547 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005548 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005549 if (pdev->devfn == 0)
5550 ssleep(30);
5551
Sathya Perlacf588472010-02-14 21:22:01 +00005552 return PCI_ERS_RESULT_NEED_RESET;
5553}
5554
5555static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5556{
5557 struct be_adapter *adapter = pci_get_drvdata(pdev);
5558 int status;
5559
5560 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005561
5562 status = pci_enable_device(pdev);
5563 if (status)
5564 return PCI_ERS_RESULT_DISCONNECT;
5565
5566 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005567 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005568 pci_restore_state(pdev);
5569
5570 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005571 dev_info(&adapter->pdev->dev,
5572 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005573 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005574 if (status)
5575 return PCI_ERS_RESULT_DISCONNECT;
5576
Sathya Perlad6b6d982012-09-05 01:56:48 +00005577 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005578 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005579 return PCI_ERS_RESULT_RECOVERED;
5580}
5581
5582static void be_eeh_resume(struct pci_dev *pdev)
5583{
5584 int status = 0;
5585 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005586
5587 dev_info(&adapter->pdev->dev, "EEH resume\n");
5588
5589 pci_save_state(pdev);
5590
Kalesh AP484d76f2015-02-23 04:20:14 -05005591 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005592 if (status)
5593 goto err;
5594
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005595 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005596 return;
5597err:
5598 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005599}
5600
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005601static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005602 .error_detected = be_eeh_err_detected,
5603 .slot_reset = be_eeh_reset,
5604 .resume = be_eeh_resume,
5605};
5606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005607static struct pci_driver be_driver = {
5608 .name = DRV_NAME,
5609 .id_table = be_dev_ids,
5610 .probe = be_probe,
5611 .remove = be_remove,
5612 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005613 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005614 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005615 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005616};
5617
5618static int __init be_init_module(void)
5619{
Joe Perches8e95a202009-12-03 07:58:21 +00005620 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5621 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005622 printk(KERN_WARNING DRV_NAME
5623 " : Module param rx_frag_size must be 2048/4096/8192."
5624 " Using 2048\n");
5625 rx_frag_size = 2048;
5626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005628 return pci_register_driver(&be_driver);
5629}
5630module_init(be_init_module);
5631
5632static void __exit be_exit_module(void)
5633{
5634 pci_unregister_driver(&be_driver);
5635}
5636module_exit(be_exit_module);