blob: 932b93a1496592ef6d38820997c74d4e36ddf526 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666{
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 struct be_tx_stats *stats = tx_stats(txo);
668
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500685 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
686 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
687 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
688 wrb->rsvd0 = 0;
689}
690
691/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
692 * to avoid the swap and shift/mask operations in wrb_fill().
693 */
694static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
695{
696 wrb->frag_pa_hi = 0;
697 wrb->frag_pa_lo = 0;
698 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000699 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700}
701
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000702static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530703 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704{
705 u8 vlan_prio;
706 u16 vlan_tag;
707
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100708 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000709 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
710 /* If vlan priority provided by OS is NOT in available bmap */
711 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
712 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
713 adapter->recommended_prio;
714
715 return vlan_tag;
716}
717
Sathya Perlac9c47142014-03-27 10:46:19 +0530718/* Used only for IP tunnel packets */
719static u16 skb_inner_ip_proto(struct sk_buff *skb)
720{
721 return (inner_ip_hdr(skb)->version == 4) ?
722 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
723}
724
725static u16 skb_ip_proto(struct sk_buff *skb)
726{
727 return (ip_hdr(skb)->version == 4) ?
728 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
729}
730
Somnath Koturcc4ce022010-10-21 07:11:14 -0700731static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530732 struct sk_buff *skb, u32 wrb_cnt, u32 len,
733 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734{
Sathya Perlac9c47142014-03-27 10:46:19 +0530735 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700736
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 memset(hdr, 0, sizeof(*hdr));
738
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530739 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000741 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530742 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
743 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000744 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530745 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530747 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530748 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530749 proto = skb_inner_ip_proto(skb);
750 } else {
751 proto = skb_ip_proto(skb);
752 }
753 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530755 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 }
758
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100759 if (skb_vlan_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530760 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 }
764
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530765 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
766 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500767
768 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
769 * When this hack is not needed, the evt bit is set while ringing DB
770 */
771 if (skip_hw_vlan)
772 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500779 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000780
Sathya Perla7101e112010-03-22 20:41:12 +0000781
Sathya Perlaf986afc2015-02-06 08:18:43 -0500782 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
783 (u64)le32_to_cpu(wrb->frag_pa_lo);
784 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000785 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500786 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500788 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500792/* Returns the number of WRBs used up by the skb */
793static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
794 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500796 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000797 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500798 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000800 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500801 struct be_eth_wrb *wrb;
802 dma_addr_t busaddr;
803 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500806 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
807 be_dws_cpu_to_le(hdr, sizeof(*hdr));
808
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 queue_head_inc(txq);
810
David S. Millerebc8d2a2009-06-09 01:01:31 -0700811 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700812 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530813
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000814 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
815 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000816 goto dma_err;
817 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, busaddr, len);
David S. Millerebc8d2a2009-06-09 01:01:31 -0700820 queue_head_inc(txq);
821 copied += len;
822 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700823
David S. Millerebc8d2a2009-06-09 01:01:31 -0700824 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530825 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530826
Ian Campbellb061b392011-08-29 23:18:23 +0000827 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000828 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000829 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000830 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700831 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700833 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000834 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835 }
836
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500837 BUG_ON(txo->sent_skb_list[head]);
838 txo->sent_skb_list[head] = skb;
839 txo->last_req_hdr = head;
840 atomic_add(wrb_cnt, &txq->used);
841 txo->last_req_wrb_cnt = wrb_cnt;
842 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500844 be_tx_stats_update(txo, skb);
845 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846
Sathya Perla7101e112010-03-22 20:41:12 +0000847dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500848 /* Bring the queue back to the state it was in before this
849 * routine was invoked.
850 */
851 txq->head = head;
852 /* skip the first wrb (hdr); it's not mapped */
853 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000854 while (copied) {
855 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000856 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000857 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500858 copied -= le32_to_cpu(wrb->frag_len);
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530859 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000860 queue_head_inc(txq);
861 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500862 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000863 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500866static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
867{
868 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
869}
870
Somnath Kotur93040ae2012-06-26 22:32:10 +0000871static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000872 struct sk_buff *skb,
873 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000874{
875 u16 vlan_tag = 0;
876
877 skb = skb_share_check(skb, GFP_ATOMIC);
878 if (unlikely(!skb))
879 return skb;
880
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100881 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000882 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530883
884 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
885 if (!vlan_tag)
886 vlan_tag = adapter->pvid;
887 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
888 * skip VLAN insertion
889 */
890 if (skip_hw_vlan)
891 *skip_hw_vlan = true;
892 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000893
894 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100895 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
896 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000897 if (unlikely(!skb))
898 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000899 skb->vlan_tci = 0;
900 }
901
902 /* Insert the outer VLAN, if any */
903 if (adapter->qnq_vid) {
904 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100905 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
906 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000907 if (unlikely(!skb))
908 return skb;
909 if (skip_hw_vlan)
910 *skip_hw_vlan = true;
911 }
912
Somnath Kotur93040ae2012-06-26 22:32:10 +0000913 return skb;
914}
915
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000916static bool be_ipv6_exthdr_check(struct sk_buff *skb)
917{
918 struct ethhdr *eh = (struct ethhdr *)skb->data;
919 u16 offset = ETH_HLEN;
920
921 if (eh->h_proto == htons(ETH_P_IPV6)) {
922 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
923
924 offset += sizeof(struct ipv6hdr);
925 if (ip6h->nexthdr != NEXTHDR_TCP &&
926 ip6h->nexthdr != NEXTHDR_UDP) {
927 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530928 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929
930 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
931 if (ehdr->hdrlen == 0xff)
932 return true;
933 }
934 }
935 return false;
936}
937
938static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
939{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100940 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000941}
942
Sathya Perla748b5392014-05-09 13:29:13 +0530943static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000944{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000946}
947
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530948static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
949 struct sk_buff *skb,
950 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000952 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000953 unsigned int eth_hdr_len;
954 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000955
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000956 /* For padded packets, BE HW modifies tot_len field in IP header
957 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000958 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000959 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000960 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
961 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000962 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100963 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000964 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000965 ip = (struct iphdr *)ip_hdr(skb);
966 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
967 }
968
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000969 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530970 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000971 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530972 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000973 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530974 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000975
Somnath Kotur93040ae2012-06-26 22:32:10 +0000976 /* HW has a bug wherein it will calculate CSUM for VLAN
977 * pkts even though it is disabled.
978 * Manually insert VLAN in pkt.
979 */
980 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100981 skb_vlan_tag_present(skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000983 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530984 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000985 }
986
987 /* HW may lockup when VLAN HW tagging is requested on
988 * certain ipv6 packets. Drop such pkts if the HW workaround to
989 * skip HW tagging is not enabled by FW.
990 */
991 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530992 (adapter->pvid || adapter->qnq_vid) &&
993 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000994 goto tx_drop;
995
996 /* Manual VLAN tag insertion to prevent:
997 * ASIC lockup when the ASIC inserts VLAN tag into
998 * certain ipv6 packets. Insert VLAN tags in driver,
999 * and set event, completion, vlan bits accordingly
1000 * in the Tx WRB.
1001 */
1002 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1003 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001005 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301006 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001007 }
1008
Sathya Perlaee9c7992013-05-22 23:04:55 +00001009 return skb;
1010tx_drop:
1011 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301012err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001013 return NULL;
1014}
1015
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301016static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1017 struct sk_buff *skb,
1018 bool *skip_hw_vlan)
1019{
1020 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1021 * less may cause a transmit stall on that port. So the work-around is
1022 * to pad short packets (<= 32 bytes) to a 36-byte length.
1023 */
1024 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001025 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301026 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301027 }
1028
1029 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1030 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1031 if (!skb)
1032 return NULL;
1033 }
1034
1035 return skb;
1036}
1037
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001038static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1039{
1040 struct be_queue_info *txq = &txo->q;
1041 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1042
1043 /* Mark the last request eventable if it hasn't been marked already */
1044 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1045 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1046
1047 /* compose a dummy wrb if there are odd set of wrbs to notify */
1048 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001049 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001050 queue_head_inc(txq);
1051 atomic_inc(&txq->used);
1052 txo->pend_wrb_cnt++;
1053 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1054 TX_HDR_WRB_NUM_SHIFT);
1055 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1056 TX_HDR_WRB_NUM_SHIFT);
1057 }
1058 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1059 txo->pend_wrb_cnt = 0;
1060}
1061
Sathya Perlaee9c7992013-05-22 23:04:55 +00001062static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1063{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001064 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001065 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001066 u16 q_idx = skb_get_queue_mapping(skb);
1067 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001068 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001069 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001070
1071 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001072 if (unlikely(!skb))
1073 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001074
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001075 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1076 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001077 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001078 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001080
1081 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1082 netif_stop_subqueue(netdev, q_idx);
1083 tx_stats(txo)->tx_stops++;
1084 }
1085
1086 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1087 be_xmit_flush(adapter, txo);
1088
1089 return NETDEV_TX_OK;
1090drop:
1091 tx_stats(txo)->tx_drv_drops++;
1092 /* Flush the already enqueued tx requests */
1093 if (flush && txo->pend_wrb_cnt)
1094 be_xmit_flush(adapter, txo);
1095
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 return NETDEV_TX_OK;
1097}
1098
1099static int be_change_mtu(struct net_device *netdev, int new_mtu)
1100{
1101 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301102 struct device *dev = &adapter->pdev->dev;
1103
1104 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1105 dev_info(dev, "MTU must be between %d and %d bytes\n",
1106 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 return -EINVAL;
1108 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301109
1110 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301111 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 netdev->mtu = new_mtu;
1113 return 0;
1114}
1115
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001116static inline bool be_in_all_promisc(struct be_adapter *adapter)
1117{
1118 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1119 BE_IF_FLAGS_ALL_PROMISCUOUS;
1120}
1121
1122static int be_set_vlan_promisc(struct be_adapter *adapter)
1123{
1124 struct device *dev = &adapter->pdev->dev;
1125 int status;
1126
1127 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1128 return 0;
1129
1130 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1131 if (!status) {
1132 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1133 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1134 } else {
1135 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1136 }
1137 return status;
1138}
1139
1140static int be_clear_vlan_promisc(struct be_adapter *adapter)
1141{
1142 struct device *dev = &adapter->pdev->dev;
1143 int status;
1144
1145 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1146 if (!status) {
1147 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1148 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1149 }
1150 return status;
1151}
1152
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001154 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1155 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 */
Sathya Perla10329df2012-06-05 19:37:18 +00001157static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158{
Vasundhara Volam50762662014-09-12 17:39:14 +05301159 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001160 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301161 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001162 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001163
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001164 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001165 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001166 return 0;
1167
Sathya Perla92bf14a2013-08-27 16:57:32 +05301168 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001169 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001170
1171 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301172 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1173 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001174
Kalesh AP4d567d92014-05-09 13:29:17 +05301175 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001176 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001177 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001178 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301179 if (addl_status(status) ==
1180 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001181 return be_set_vlan_promisc(adapter);
1182 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1183 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001185 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186}
1187
Patrick McHardy80d5c362013-04-19 02:04:28 +00001188static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001191 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001193 /* Packets with VID 0 are always received by Lancer by default */
1194 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301195 return status;
1196
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301197 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301198 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001199
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301200 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301201 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001202
Somnath Kotura6b74e02014-01-21 15:50:55 +05301203 status = be_vid_config(adapter);
1204 if (status) {
1205 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301206 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301207 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301208
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001209 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210}
1211
Patrick McHardy80d5c362013-04-19 02:04:28 +00001212static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213{
1214 struct be_adapter *adapter = netdev_priv(netdev);
1215
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001216 /* Packets with VID 0 are always received by Lancer by default */
1217 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301218 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001219
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301220 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301221 adapter->vlans_added--;
1222
1223 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224}
1225
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001226static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301227{
Sathya Perlaac34b742015-02-06 08:18:40 -05001228 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001229 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1230}
1231
1232static void be_set_all_promisc(struct be_adapter *adapter)
1233{
1234 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1235 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1236}
1237
1238static void be_set_mc_promisc(struct be_adapter *adapter)
1239{
1240 int status;
1241
1242 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1243 return;
1244
1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1246 if (!status)
1247 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1248}
1249
1250static void be_set_mc_list(struct be_adapter *adapter)
1251{
1252 int status;
1253
1254 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1255 if (!status)
1256 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1257 else
1258 be_set_mc_promisc(adapter);
1259}
1260
1261static void be_set_uc_list(struct be_adapter *adapter)
1262{
1263 struct netdev_hw_addr *ha;
1264 int i = 1; /* First slot is claimed by the Primary MAC */
1265
1266 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1267 be_cmd_pmac_del(adapter, adapter->if_handle,
1268 adapter->pmac_id[i], 0);
1269
1270 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1271 be_set_all_promisc(adapter);
1272 return;
1273 }
1274
1275 netdev_for_each_uc_addr(ha, adapter->netdev) {
1276 adapter->uc_macs++; /* First slot is for Primary MAC */
1277 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1278 &adapter->pmac_id[adapter->uc_macs], 0);
1279 }
1280}
1281
1282static void be_clear_uc_list(struct be_adapter *adapter)
1283{
1284 int i;
1285
1286 for (i = 1; i < (adapter->uc_macs + 1); i++)
1287 be_cmd_pmac_del(adapter, adapter->if_handle,
1288 adapter->pmac_id[i], 0);
1289 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301290}
1291
Sathya Perlaa54769f2011-10-24 02:45:00 +00001292static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293{
1294 struct be_adapter *adapter = netdev_priv(netdev);
1295
1296 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001297 be_set_all_promisc(adapter);
1298 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001300
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001301 /* Interface was previously in promiscuous mode; disable it */
1302 if (be_in_all_promisc(adapter)) {
1303 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001304 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001305 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001306 }
1307
Sathya Perlae7b909a2009-11-22 22:01:10 +00001308 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001309 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001310 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1311 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301312 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001313 }
Kalesh APa0794882014-05-30 19:06:23 +05301314
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001315 if (netdev_uc_count(netdev) != adapter->uc_macs)
1316 be_set_uc_list(adapter);
1317
1318 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319}
1320
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001321static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1322{
1323 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001325 int status;
1326
Sathya Perla11ac75e2011-12-13 00:58:50 +00001327 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001328 return -EPERM;
1329
Sathya Perla11ac75e2011-12-13 00:58:50 +00001330 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001331 return -EINVAL;
1332
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301333 /* Proceed further only if user provided MAC is different
1334 * from active MAC
1335 */
1336 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1337 return 0;
1338
Sathya Perla3175d8c2013-07-23 15:25:03 +05301339 if (BEx_chip(adapter)) {
1340 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1341 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001342
Sathya Perla11ac75e2011-12-13 00:58:50 +00001343 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1344 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301345 } else {
1346 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1347 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001348 }
1349
Kalesh APabccf232014-07-17 16:20:24 +05301350 if (status) {
1351 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1352 mac, vf, status);
1353 return be_cmd_status(status);
1354 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001355
Kalesh APabccf232014-07-17 16:20:24 +05301356 ether_addr_copy(vf_cfg->mac_addr, mac);
1357
1358 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001359}
1360
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001361static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301362 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001363{
1364 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001365 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001366
Sathya Perla11ac75e2011-12-13 00:58:50 +00001367 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001368 return -EPERM;
1369
Sathya Perla11ac75e2011-12-13 00:58:50 +00001370 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001371 return -EINVAL;
1372
1373 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001374 vi->max_tx_rate = vf_cfg->tx_rate;
1375 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001376 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1377 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001378 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301379 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001380
1381 return 0;
1382}
1383
Sathya Perla748b5392014-05-09 13:29:13 +05301384static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001385{
1386 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001387 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001388 int status = 0;
1389
Sathya Perla11ac75e2011-12-13 00:58:50 +00001390 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001391 return -EPERM;
1392
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001393 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001394 return -EINVAL;
1395
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001396 if (vlan || qos) {
1397 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301398 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001399 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1400 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001401 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001402 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301403 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1404 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001405 }
1406
Kalesh APabccf232014-07-17 16:20:24 +05301407 if (status) {
1408 dev_err(&adapter->pdev->dev,
1409 "VLAN %d config on VF %d failed : %#x\n", vlan,
1410 vf, status);
1411 return be_cmd_status(status);
1412 }
1413
1414 vf_cfg->vlan_tag = vlan;
1415
1416 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001417}
1418
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001419static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1420 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001421{
1422 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301423 struct device *dev = &adapter->pdev->dev;
1424 int percent_rate, status = 0;
1425 u16 link_speed = 0;
1426 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001427
Sathya Perla11ac75e2011-12-13 00:58:50 +00001428 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001429 return -EPERM;
1430
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001431 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001432 return -EINVAL;
1433
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001434 if (min_tx_rate)
1435 return -EINVAL;
1436
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301437 if (!max_tx_rate)
1438 goto config_qos;
1439
1440 status = be_cmd_link_status_query(adapter, &link_speed,
1441 &link_status, 0);
1442 if (status)
1443 goto err;
1444
1445 if (!link_status) {
1446 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301447 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301448 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001449 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001450
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301451 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1452 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1453 link_speed);
1454 status = -EINVAL;
1455 goto err;
1456 }
1457
1458 /* On Skyhawk the QOS setting must be done only as a % value */
1459 percent_rate = link_speed / 100;
1460 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1461 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1462 percent_rate);
1463 status = -EINVAL;
1464 goto err;
1465 }
1466
1467config_qos:
1468 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001469 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301470 goto err;
1471
1472 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1473 return 0;
1474
1475err:
1476 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1477 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301478 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001479}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301480
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301481static int be_set_vf_link_state(struct net_device *netdev, int vf,
1482 int link_state)
1483{
1484 struct be_adapter *adapter = netdev_priv(netdev);
1485 int status;
1486
1487 if (!sriov_enabled(adapter))
1488 return -EPERM;
1489
1490 if (vf >= adapter->num_vfs)
1491 return -EINVAL;
1492
1493 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301494 if (status) {
1495 dev_err(&adapter->pdev->dev,
1496 "Link state change on VF %d failed: %#x\n", vf, status);
1497 return be_cmd_status(status);
1498 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301499
Kalesh APabccf232014-07-17 16:20:24 +05301500 adapter->vf_cfg[vf].plink_tracking = link_state;
1501
1502 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301503}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001504
Sathya Perla2632baf2013-10-01 16:00:00 +05301505static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1506 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507{
Sathya Perla2632baf2013-10-01 16:00:00 +05301508 aic->rx_pkts_prev = rx_pkts;
1509 aic->tx_reqs_prev = tx_pkts;
1510 aic->jiffies = now;
1511}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001512
Sathya Perla2632baf2013-10-01 16:00:00 +05301513static void be_eqd_update(struct be_adapter *adapter)
1514{
1515 struct be_set_eqd set_eqd[MAX_EVT_QS];
1516 int eqd, i, num = 0, start;
1517 struct be_aic_obj *aic;
1518 struct be_eq_obj *eqo;
1519 struct be_rx_obj *rxo;
1520 struct be_tx_obj *txo;
1521 u64 rx_pkts, tx_pkts;
1522 ulong now;
1523 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001524
Sathya Perla2632baf2013-10-01 16:00:00 +05301525 for_all_evt_queues(adapter, eqo, i) {
1526 aic = &adapter->aic_obj[eqo->idx];
1527 if (!aic->enable) {
1528 if (aic->jiffies)
1529 aic->jiffies = 0;
1530 eqd = aic->et_eqd;
1531 goto modify_eqd;
1532 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533
Sathya Perla2632baf2013-10-01 16:00:00 +05301534 rxo = &adapter->rx_obj[eqo->idx];
1535 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001536 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301537 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001538 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539
Sathya Perla2632baf2013-10-01 16:00:00 +05301540 txo = &adapter->tx_obj[eqo->idx];
1541 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001542 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301543 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001544 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001545
Sathya Perla2632baf2013-10-01 16:00:00 +05301546 /* Skip, if wrapped around or first calculation */
1547 now = jiffies;
1548 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1549 rx_pkts < aic->rx_pkts_prev ||
1550 tx_pkts < aic->tx_reqs_prev) {
1551 be_aic_update(aic, rx_pkts, tx_pkts, now);
1552 continue;
1553 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001554
Sathya Perla2632baf2013-10-01 16:00:00 +05301555 delta = jiffies_to_msecs(now - aic->jiffies);
1556 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1557 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1558 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559
Sathya Perla2632baf2013-10-01 16:00:00 +05301560 if (eqd < 8)
1561 eqd = 0;
1562 eqd = min_t(u32, eqd, aic->max_eqd);
1563 eqd = max_t(u32, eqd, aic->min_eqd);
1564
1565 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001566modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301567 if (eqd != aic->prev_eqd) {
1568 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1569 set_eqd[num].eq_id = eqo->q.id;
1570 aic->prev_eqd = eqd;
1571 num++;
1572 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001573 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301574
1575 if (num)
1576 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001577}
1578
Sathya Perla3abcded2010-10-03 22:12:27 -07001579static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301580 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001581{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001582 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001583
Sathya Perlaab1594e2011-07-25 19:10:15 +00001584 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001585 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001586 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001587 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001588 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001589 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001590 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001591 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001592 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593}
1594
Sathya Perla2e588f82011-03-11 02:49:26 +00001595static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001596{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001597 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301598 * Also ignore ipcksm for ipv6 pkts
1599 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001600 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301601 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001602}
1603
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301604static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001608 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301609 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perla3abcded2010-10-03 22:12:27 -07001611 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 BUG_ON(!rx_page_info->page);
1613
Sathya Perlae50287b2014-03-04 12:14:38 +05301614 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001615 dma_unmap_page(&adapter->pdev->dev,
1616 dma_unmap_addr(rx_page_info, bus),
1617 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301618 rx_page_info->last_frag = false;
1619 } else {
1620 dma_sync_single_for_cpu(&adapter->pdev->dev,
1621 dma_unmap_addr(rx_page_info, bus),
1622 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001623 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301625 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 atomic_dec(&rxq->used);
1627 return rx_page_info;
1628}
1629
1630/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001631static void be_rx_compl_discard(struct be_rx_obj *rxo,
1632 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001635 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001637 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301638 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001639 put_page(page_info->page);
1640 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 }
1642}
1643
1644/*
1645 * skb_fill_rx_data forms a complete skb for an ether frame
1646 * indicated by rxcp.
1647 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001648static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1649 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 u16 i, j;
1653 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 u8 *start;
1655
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301656 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 start = page_address(page_info->page) + page_info->page_offset;
1658 prefetch(start);
1659
1660 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001661 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 skb->len = curr_frag_len;
1664 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001665 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 /* Complete packet has now been moved to data */
1667 put_page(page_info->page);
1668 skb->data_len = 0;
1669 skb->tail += curr_frag_len;
1670 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001671 hdr_len = ETH_HLEN;
1672 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001674 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 skb_shinfo(skb)->frags[0].page_offset =
1676 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301677 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1678 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001680 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 skb->tail += hdr_len;
1682 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001683 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684
Sathya Perla2e588f82011-03-11 02:49:26 +00001685 if (rxcp->pkt_size <= rx_frag_size) {
1686 BUG_ON(rxcp->num_rcvd != 1);
1687 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688 }
1689
1690 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001691 remaining = rxcp->pkt_size - curr_frag_len;
1692 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301693 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001694 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001696 /* Coalesce all frags from the same physical page in one slot */
1697 if (page_info->page_offset == 0) {
1698 /* Fresh page */
1699 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001700 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001701 skb_shinfo(skb)->frags[j].page_offset =
1702 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001703 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001704 skb_shinfo(skb)->nr_frags++;
1705 } else {
1706 put_page(page_info->page);
1707 }
1708
Eric Dumazet9e903e02011-10-18 21:00:24 +00001709 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 skb->len += curr_frag_len;
1711 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001712 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001713 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001714 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001716 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717}
1718
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001719/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301720static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001721 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001723 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001724 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001726
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001727 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001728 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001729 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001730 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 return;
1732 }
1733
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001734 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001736 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001737 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001738 else
1739 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001741 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001742 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001743 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001744 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301745
Tom Herbertb6c0e892014-08-27 21:27:17 -07001746 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301747 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
Jiri Pirko343e43c2011-08-25 02:50:51 +00001749 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001750 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001751
1752 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753}
1754
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001755/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001756static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1757 struct napi_struct *napi,
1758 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001760 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001762 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001763 u16 remaining, curr_frag_len;
1764 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001765
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001766 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001767 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001768 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001769 return;
1770 }
1771
Sathya Perla2e588f82011-03-11 02:49:26 +00001772 remaining = rxcp->pkt_size;
1773 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301774 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775
1776 curr_frag_len = min(remaining, rx_frag_size);
1777
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001778 /* Coalesce all frags from the same physical page in one slot */
1779 if (i == 0 || page_info->page_offset == 0) {
1780 /* First frag or Fresh page */
1781 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001782 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001783 skb_shinfo(skb)->frags[j].page_offset =
1784 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001785 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001786 } else {
1787 put_page(page_info->page);
1788 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001789 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001790 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792 memset(page_info, 0, sizeof(*page_info));
1793 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001794 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001796 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001797 skb->len = rxcp->pkt_size;
1798 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001799 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001800 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001801 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001802 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301803
Tom Herbertb6c0e892014-08-27 21:27:17 -07001804 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301805 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001806
Jiri Pirko343e43c2011-08-25 02:50:51 +00001807 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001808 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001809
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001810 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811}
1812
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1814 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301816 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1817 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1818 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1819 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1820 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1821 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1822 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1823 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1824 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1825 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1826 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001827 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301828 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1829 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001830 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301831 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301832 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301833 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001834}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001836static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1837 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001838{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301839 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1840 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1841 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1842 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1843 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1844 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1845 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1846 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1847 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1848 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1849 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001850 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301851 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1852 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001853 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301854 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1855 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001856}
1857
1858static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1859{
1860 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1861 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1862 struct be_adapter *adapter = rxo->adapter;
1863
1864 /* For checking the valid bit it is Ok to use either definition as the
1865 * valid bit is at the same position in both v0 and v1 Rx compl */
1866 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 return NULL;
1868
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001869 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001870 be_dws_le_to_cpu(compl, sizeof(*compl));
1871
1872 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001873 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001874 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001875 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001876
Somnath Koture38b1702013-05-29 22:55:56 +00001877 if (rxcp->ip_frag)
1878 rxcp->l4_csum = 0;
1879
Sathya Perla15d72182011-03-21 20:49:26 +00001880 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301881 /* In QNQ modes, if qnq bit is not set, then the packet was
1882 * tagged only with the transparent outer vlan-tag and must
1883 * not be treated as a vlan packet by host
1884 */
1885 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001886 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001887
Sathya Perla15d72182011-03-21 20:49:26 +00001888 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001889 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001890
Somnath Kotur939cf302011-08-18 21:51:49 -07001891 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301892 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001893 rxcp->vlanf = 0;
1894 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001895
1896 /* As the compl has been parsed, reset it; we wont touch it again */
1897 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
Sathya Perla3abcded2010-10-03 22:12:27 -07001899 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 return rxcp;
1901}
1902
Eric Dumazet1829b082011-03-01 05:48:12 +00001903static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001906
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001908 gfp |= __GFP_COMP;
1909 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910}
1911
1912/*
1913 * Allocate a page, split it to fragments of size rx_frag_size and post as
1914 * receive buffers to BE
1915 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301916static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917{
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001919 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001920 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001922 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 struct be_eth_rx_d *rxd;
1924 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301925 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
Sathya Perla3abcded2010-10-03 22:12:27 -07001927 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301928 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001930 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001932 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 break;
1934 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001935 page_dmaaddr = dma_map_page(dev, pagep, 0,
1936 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001937 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001938 if (dma_mapping_error(dev, page_dmaaddr)) {
1939 put_page(pagep);
1940 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301941 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001942 break;
1943 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301944 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 } else {
1946 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301947 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301949 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
1952 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301953 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1955 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
1957 /* Any space left in the current big page for another frag? */
1958 if ((page_offset + rx_frag_size + rx_frag_size) >
1959 adapter->big_page_size) {
1960 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301961 page_info->last_frag = true;
1962 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1963 } else {
1964 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001966
1967 prev_page_info = page_info;
1968 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301971
1972 /* Mark the last frag of a page when we break out of the above loop
1973 * with no more slots available in the RXQ
1974 */
1975 if (pagep) {
1976 prev_page_info->last_frag = true;
1977 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1978 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979
1980 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301982 if (rxo->rx_post_starved)
1983 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301984 do {
1985 notify = min(256u, posted);
1986 be_rxq_notify(adapter, rxq->id, notify);
1987 posted -= notify;
1988 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001989 } else if (atomic_read(&rxq->used) == 0) {
1990 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993}
1994
Sathya Perla5fb379e2009-06-18 00:02:59 +00001995static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1998
1999 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2000 return NULL;
2001
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002002 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2004
2005 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2006
2007 queue_tail_inc(tx_cq);
2008 return txcp;
2009}
2010
Sathya Perla3c8def92011-06-12 20:01:58 +00002011static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302012 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013{
Sathya Perla3c8def92011-06-12 20:01:58 +00002014 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002015 struct be_queue_info *txq = &txo->q;
2016 u16 frag_index, num_wrbs = 0;
2017 struct sk_buff *skb = NULL;
2018 bool unmap_skb_hdr = false;
2019 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002021 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002022 if (sent_skbs[txq->tail]) {
2023 /* Free skb from prev req */
2024 if (skb)
2025 dev_consume_skb_any(skb);
2026 skb = sent_skbs[txq->tail];
2027 sent_skbs[txq->tail] = NULL;
2028 queue_tail_inc(txq); /* skip hdr wrb */
2029 num_wrbs++;
2030 unmap_skb_hdr = true;
2031 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002032 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002033 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002034 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002035 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002036 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002038 num_wrbs++;
2039 } while (frag_index != last_index);
2040 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002042 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043}
2044
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045/* Return the number of events in the event queue */
2046static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002047{
2048 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 do {
2052 eqe = queue_tail_node(&eqo->q);
2053 if (eqe->evt == 0)
2054 break;
2055
2056 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002057 eqe->evt = 0;
2058 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 queue_tail_inc(&eqo->q);
2060 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002061
2062 return num;
2063}
2064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065/* Leaves the EQ is disarmed state */
2066static void be_eq_clean(struct be_eq_obj *eqo)
2067{
2068 int num = events_get(eqo);
2069
2070 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2071}
2072
2073static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074{
2075 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 struct be_queue_info *rxq = &rxo->q;
2077 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002078 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002079 struct be_adapter *adapter = rxo->adapter;
2080 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081
Sathya Perlad23e9462012-12-17 19:38:51 +00002082 /* Consume pending rx completions.
2083 * Wait for the flush completion (identified by zero num_rcvd)
2084 * to arrive. Notify CQ even when there are no more CQ entries
2085 * for HW to flush partially coalesced CQ entries.
2086 * In Lancer, there is no need to wait for flush compl.
2087 */
2088 for (;;) {
2089 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302090 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002091 if (lancer_chip(adapter))
2092 break;
2093
2094 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2095 dev_warn(&adapter->pdev->dev,
2096 "did not receive flush compl\n");
2097 break;
2098 }
2099 be_cq_notify(adapter, rx_cq->id, true, 0);
2100 mdelay(1);
2101 } else {
2102 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002103 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002104 if (rxcp->num_rcvd == 0)
2105 break;
2106 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 }
2108
Sathya Perlad23e9462012-12-17 19:38:51 +00002109 /* After cleanup, leave the CQ in unarmed state */
2110 be_cq_notify(adapter, rx_cq->id, false, 0);
2111
2112 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302113 while (atomic_read(&rxq->used) > 0) {
2114 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115 put_page(page_info->page);
2116 memset(page_info, 0, sizeof(*page_info));
2117 }
2118 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302119 rxq->tail = 0;
2120 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121}
2122
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002123static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002125 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2126 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002127 struct be_tx_obj *txo;
2128 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002129 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002130 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302132 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002133 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002134 pending_txqs = adapter->num_tx_qs;
2135
2136 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302137 cmpl = 0;
2138 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002139 txq = &txo->q;
2140 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302141 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002142 num_wrbs += be_tx_compl_process(adapter, txo,
2143 end_idx);
2144 cmpl++;
2145 }
2146 if (cmpl) {
2147 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2148 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302149 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002150 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002151 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002152 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002153 }
2154
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302155 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002156 break;
2157
2158 mdelay(1);
2159 } while (true);
2160
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002161 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002162 for_all_tx_queues(adapter, txo, i) {
2163 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002164
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002165 if (atomic_read(&txq->used)) {
2166 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2167 i, atomic_read(&txq->used));
2168 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002169 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002170 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2171 txq->len);
2172 /* Use the tx-compl process logic to handle requests
2173 * that were not sent to the HW.
2174 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002175 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2176 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002177 BUG_ON(atomic_read(&txq->used));
2178 txo->pend_wrb_cnt = 0;
2179 /* Since hw was never notified of these requests,
2180 * reset TXQ indices
2181 */
2182 txq->head = notified_idx;
2183 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002184 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002185 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186}
2187
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188static void be_evt_queues_destroy(struct be_adapter *adapter)
2189{
2190 struct be_eq_obj *eqo;
2191 int i;
2192
2193 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002194 if (eqo->q.created) {
2195 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302197 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302198 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002199 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 be_queue_free(adapter, &eqo->q);
2201 }
2202}
2203
2204static int be_evt_queues_create(struct be_adapter *adapter)
2205{
2206 struct be_queue_info *eq;
2207 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302208 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 int i, rc;
2210
Sathya Perla92bf14a2013-08-27 16:57:32 +05302211 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2212 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213
2214 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302215 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2216 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302217 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302218 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302221 aic->max_eqd = BE_MAX_EQD;
2222 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223
2224 eq = &eqo->q;
2225 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302226 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227 if (rc)
2228 return rc;
2229
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302230 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 if (rc)
2232 return rc;
2233 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002234 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002235}
2236
Sathya Perla5fb379e2009-06-18 00:02:59 +00002237static void be_mcc_queues_destroy(struct be_adapter *adapter)
2238{
2239 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002240
Sathya Perla8788fdc2009-07-27 22:52:03 +00002241 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002242 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002243 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002244 be_queue_free(adapter, q);
2245
Sathya Perla8788fdc2009-07-27 22:52:03 +00002246 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002247 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002248 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002249 be_queue_free(adapter, q);
2250}
2251
2252/* Must be called only after TX qs are created as MCC shares TX EQ */
2253static int be_mcc_queues_create(struct be_adapter *adapter)
2254{
2255 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002256
Sathya Perla8788fdc2009-07-27 22:52:03 +00002257 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002258 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302259 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002260 goto err;
2261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 /* Use the default EQ for MCC completions */
2263 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002264 goto mcc_cq_free;
2265
Sathya Perla8788fdc2009-07-27 22:52:03 +00002266 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002267 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2268 goto mcc_cq_destroy;
2269
Sathya Perla8788fdc2009-07-27 22:52:03 +00002270 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002271 goto mcc_q_free;
2272
2273 return 0;
2274
2275mcc_q_free:
2276 be_queue_free(adapter, q);
2277mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002278 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002279mcc_cq_free:
2280 be_queue_free(adapter, cq);
2281err:
2282 return -1;
2283}
2284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285static void be_tx_queues_destroy(struct be_adapter *adapter)
2286{
2287 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002288 struct be_tx_obj *txo;
2289 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290
Sathya Perla3c8def92011-06-12 20:01:58 +00002291 for_all_tx_queues(adapter, txo, i) {
2292 q = &txo->q;
2293 if (q->created)
2294 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2295 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296
Sathya Perla3c8def92011-06-12 20:01:58 +00002297 q = &txo->cq;
2298 if (q->created)
2299 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2300 be_queue_free(adapter, q);
2301 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302}
2303
Sathya Perla77071332013-08-27 16:57:34 +05302304static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002307 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302308 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309
Sathya Perla92bf14a2013-08-27 16:57:32 +05302310 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002311
Sathya Perla3c8def92011-06-12 20:01:58 +00002312 for_all_tx_queues(adapter, txo, i) {
2313 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2315 sizeof(struct be_eth_tx_compl));
2316 if (status)
2317 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318
John Stultz827da442013-10-07 15:51:58 -07002319 u64_stats_init(&txo->stats.sync);
2320 u64_stats_init(&txo->stats.sync_compl);
2321
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 /* If num_evt_qs is less than num_tx_qs, then more than
2323 * one txq share an eq
2324 */
2325 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2326 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2327 if (status)
2328 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2331 sizeof(struct be_eth_wrb));
2332 if (status)
2333 return status;
2334
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002335 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 if (status)
2337 return status;
2338 }
2339
Sathya Perlad3791422012-09-28 04:39:44 +00002340 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2341 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002342 return 0;
2343}
2344
2345static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346{
2347 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002348 struct be_rx_obj *rxo;
2349 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002350
Sathya Perla3abcded2010-10-03 22:12:27 -07002351 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002352 q = &rxo->cq;
2353 if (q->created)
2354 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2355 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357}
2358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002360{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002362 struct be_rx_obj *rxo;
2363 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364
Sathya Perla92bf14a2013-08-27 16:57:32 +05302365 /* We can create as many RSS rings as there are EQs. */
2366 adapter->num_rx_qs = adapter->num_evt_qs;
2367
2368 /* We'll use RSS only if atleast 2 RSS rings are supported.
2369 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302371 if (adapter->num_rx_qs > 1)
2372 adapter->num_rx_qs++;
2373
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002375 for_all_rx_queues(adapter, rxo, i) {
2376 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 cq = &rxo->cq;
2378 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302379 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002380 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382
John Stultz827da442013-10-07 15:51:58 -07002383 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2385 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002386 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002388 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389
Sathya Perlad3791422012-09-28 04:39:44 +00002390 dev_info(&adapter->pdev->dev,
2391 "created %d RSS queue(s) and 1 default RX queue\n",
2392 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002394}
2395
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396static irqreturn_t be_intx(int irq, void *dev)
2397{
Sathya Perlae49cc342012-11-27 19:50:02 +00002398 struct be_eq_obj *eqo = dev;
2399 struct be_adapter *adapter = eqo->adapter;
2400 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002402 /* IRQ is not expected when NAPI is scheduled as the EQ
2403 * will not be armed.
2404 * But, this can happen on Lancer INTx where it takes
2405 * a while to de-assert INTx or in BE2 where occasionaly
2406 * an interrupt may be raised even when EQ is unarmed.
2407 * If NAPI is already scheduled, then counting & notifying
2408 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002409 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002410 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002411 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002412 __napi_schedule(&eqo->napi);
2413 if (num_evts)
2414 eqo->spurious_intr = 0;
2415 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002416 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002417
2418 /* Return IRQ_HANDLED only for the the first spurious intr
2419 * after a valid intr to stop the kernel from branding
2420 * this irq as a bad one!
2421 */
2422 if (num_evts || eqo->spurious_intr++ == 0)
2423 return IRQ_HANDLED;
2424 else
2425 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426}
2427
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431
Sathya Perla0b545a62012-11-23 00:27:18 +00002432 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2433 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002434 return IRQ_HANDLED;
2435}
2436
Sathya Perla2e588f82011-03-11 02:49:26 +00002437static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002438{
Somnath Koture38b1702013-05-29 22:55:56 +00002439 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440}
2441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302443 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444{
Sathya Perla3abcded2010-10-03 22:12:27 -07002445 struct be_adapter *adapter = rxo->adapter;
2446 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002447 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302449 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450
2451 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002452 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002453 if (!rxcp)
2454 break;
2455
Sathya Perla12004ae2011-08-02 19:57:46 +00002456 /* Is it a flush compl that has no data */
2457 if (unlikely(rxcp->num_rcvd == 0))
2458 goto loop_continue;
2459
2460 /* Discard compl with partial DMA Lancer B0 */
2461 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002463 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002464 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002465
Sathya Perla12004ae2011-08-02 19:57:46 +00002466 /* On BE drop pkts that arrive due to imperfect filtering in
2467 * promiscuous mode on some skews
2468 */
2469 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302470 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002471 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002472 goto loop_continue;
2473 }
2474
Sathya Perla6384a4d2013-10-25 10:40:16 +05302475 /* Don't do gro when we're busy_polling */
2476 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002477 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002478 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302479 be_rx_compl_process(rxo, napi, rxcp);
2480
Sathya Perla12004ae2011-08-02 19:57:46 +00002481loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302482 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002483 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484 }
2485
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002486 if (work_done) {
2487 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002488
Sathya Perla6384a4d2013-10-25 10:40:16 +05302489 /* When an rx-obj gets into post_starved state, just
2490 * let be_worker do the posting.
2491 */
2492 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2493 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302494 be_post_rx_frags(rxo, GFP_ATOMIC,
2495 max_t(u32, MAX_RX_POST,
2496 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 return work_done;
2500}
2501
Kalesh AP512bb8a2014-09-02 09:56:49 +05302502static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2503{
2504 switch (status) {
2505 case BE_TX_COMP_HDR_PARSE_ERR:
2506 tx_stats(txo)->tx_hdr_parse_err++;
2507 break;
2508 case BE_TX_COMP_NDMA_ERR:
2509 tx_stats(txo)->tx_dma_err++;
2510 break;
2511 case BE_TX_COMP_ACL_ERR:
2512 tx_stats(txo)->tx_spoof_check_err++;
2513 break;
2514 }
2515}
2516
2517static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2518{
2519 switch (status) {
2520 case LANCER_TX_COMP_LSO_ERR:
2521 tx_stats(txo)->tx_tso_err++;
2522 break;
2523 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2524 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2525 tx_stats(txo)->tx_spoof_check_err++;
2526 break;
2527 case LANCER_TX_COMP_QINQ_ERR:
2528 tx_stats(txo)->tx_qinq_err++;
2529 break;
2530 case LANCER_TX_COMP_PARITY_ERR:
2531 tx_stats(txo)->tx_internal_parity_err++;
2532 break;
2533 case LANCER_TX_COMP_DMA_ERR:
2534 tx_stats(txo)->tx_dma_err++;
2535 break;
2536 }
2537}
2538
Sathya Perlac8f64612014-09-02 09:56:55 +05302539static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2540 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302543 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302544 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302545 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546
Sathya Perlac8f64612014-09-02 09:56:55 +05302547 while ((txcp = be_tx_compl_get(&txo->cq))) {
2548 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2549 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2550 work_done++;
2551
Kalesh AP512bb8a2014-09-02 09:56:49 +05302552 compl_status = GET_TX_COMPL_BITS(status, txcp);
2553 if (compl_status) {
2554 if (lancer_chip(adapter))
2555 lancer_update_tx_err(txo, compl_status);
2556 else
2557 be_update_tx_err(txo, compl_status);
2558 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 }
2560
2561 if (work_done) {
2562 be_cq_notify(adapter, txo->cq.id, true, work_done);
2563 atomic_sub(num_wrbs, &txo->q.used);
2564
2565 /* As Tx wrbs have been freed up, wake up netdev queue
2566 * if it was stopped due to lack of tx wrbs. */
2567 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302568 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002569 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002570 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002571
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2573 tx_stats(txo)->tx_compl += work_done;
2574 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2575 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576}
Sathya Perla3c8def92011-06-12 20:01:58 +00002577
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002578#ifdef CONFIG_NET_RX_BUSY_POLL
2579static inline bool be_lock_napi(struct be_eq_obj *eqo)
2580{
2581 bool status = true;
2582
2583 spin_lock(&eqo->lock); /* BH is already disabled */
2584 if (eqo->state & BE_EQ_LOCKED) {
2585 WARN_ON(eqo->state & BE_EQ_NAPI);
2586 eqo->state |= BE_EQ_NAPI_YIELD;
2587 status = false;
2588 } else {
2589 eqo->state = BE_EQ_NAPI;
2590 }
2591 spin_unlock(&eqo->lock);
2592 return status;
2593}
2594
2595static inline void be_unlock_napi(struct be_eq_obj *eqo)
2596{
2597 spin_lock(&eqo->lock); /* BH is already disabled */
2598
2599 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2600 eqo->state = BE_EQ_IDLE;
2601
2602 spin_unlock(&eqo->lock);
2603}
2604
2605static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2606{
2607 bool status = true;
2608
2609 spin_lock_bh(&eqo->lock);
2610 if (eqo->state & BE_EQ_LOCKED) {
2611 eqo->state |= BE_EQ_POLL_YIELD;
2612 status = false;
2613 } else {
2614 eqo->state |= BE_EQ_POLL;
2615 }
2616 spin_unlock_bh(&eqo->lock);
2617 return status;
2618}
2619
2620static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2621{
2622 spin_lock_bh(&eqo->lock);
2623
2624 WARN_ON(eqo->state & (BE_EQ_NAPI));
2625 eqo->state = BE_EQ_IDLE;
2626
2627 spin_unlock_bh(&eqo->lock);
2628}
2629
2630static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2631{
2632 spin_lock_init(&eqo->lock);
2633 eqo->state = BE_EQ_IDLE;
2634}
2635
2636static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2637{
2638 local_bh_disable();
2639
2640 /* It's enough to just acquire napi lock on the eqo to stop
2641 * be_busy_poll() from processing any queueus.
2642 */
2643 while (!be_lock_napi(eqo))
2644 mdelay(1);
2645
2646 local_bh_enable();
2647}
2648
2649#else /* CONFIG_NET_RX_BUSY_POLL */
2650
2651static inline bool be_lock_napi(struct be_eq_obj *eqo)
2652{
2653 return true;
2654}
2655
2656static inline void be_unlock_napi(struct be_eq_obj *eqo)
2657{
2658}
2659
2660static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2661{
2662 return false;
2663}
2664
2665static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2666{
2667}
2668
2669static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2670{
2671}
2672
2673static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2674{
2675}
2676#endif /* CONFIG_NET_RX_BUSY_POLL */
2677
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302678int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002679{
2680 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2681 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002682 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302683 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302684 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002685
Sathya Perla0b545a62012-11-23 00:27:18 +00002686 num_evts = events_get(eqo);
2687
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302688 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2689 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002690
Sathya Perla6384a4d2013-10-25 10:40:16 +05302691 if (be_lock_napi(eqo)) {
2692 /* This loop will iterate twice for EQ0 in which
2693 * completions of the last RXQ (default one) are also processed
2694 * For other EQs the loop iterates only once
2695 */
2696 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2697 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2698 max_work = max(work, max_work);
2699 }
2700 be_unlock_napi(eqo);
2701 } else {
2702 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002703 }
2704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002705 if (is_mcc_eqo(eqo))
2706 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002707
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002708 if (max_work < budget) {
2709 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002710 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002711 } else {
2712 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002713 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002714 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716}
2717
Sathya Perla6384a4d2013-10-25 10:40:16 +05302718#ifdef CONFIG_NET_RX_BUSY_POLL
2719static int be_busy_poll(struct napi_struct *napi)
2720{
2721 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2722 struct be_adapter *adapter = eqo->adapter;
2723 struct be_rx_obj *rxo;
2724 int i, work = 0;
2725
2726 if (!be_lock_busy_poll(eqo))
2727 return LL_FLUSH_BUSY;
2728
2729 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2730 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2731 if (work)
2732 break;
2733 }
2734
2735 be_unlock_busy_poll(eqo);
2736 return work;
2737}
2738#endif
2739
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002740void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002741{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002742 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2743 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002744 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302745 bool error_detected = false;
2746 struct device *dev = &adapter->pdev->dev;
2747 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002748
Sathya Perlad23e9462012-12-17 19:38:51 +00002749 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002750 return;
2751
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002752 if (lancer_chip(adapter)) {
2753 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2754 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2755 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302756 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002757 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302758 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302759 adapter->hw_error = true;
2760 /* Do not log error messages if its a FW reset */
2761 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2762 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2763 dev_info(dev, "Firmware update in progress\n");
2764 } else {
2765 error_detected = true;
2766 dev_err(dev, "Error detected in the card\n");
2767 dev_err(dev, "ERR: sliport status 0x%x\n",
2768 sliport_status);
2769 dev_err(dev, "ERR: sliport error1 0x%x\n",
2770 sliport_err1);
2771 dev_err(dev, "ERR: sliport error2 0x%x\n",
2772 sliport_err2);
2773 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002774 }
2775 } else {
2776 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302777 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002778 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302779 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002780 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302781 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002782 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302783 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002784
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002785 ue_lo = (ue_lo & ~ue_lo_mask);
2786 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002787
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302788 /* On certain platforms BE hardware can indicate spurious UEs.
2789 * Allow HW to stop working completely in case of a real UE.
2790 * Hence not setting the hw_error for UE detection.
2791 */
2792
2793 if (ue_lo || ue_hi) {
2794 error_detected = true;
2795 dev_err(dev,
2796 "Unrecoverable Error detected in the adapter");
2797 dev_err(dev, "Please reboot server to recover");
2798 if (skyhawk_chip(adapter))
2799 adapter->hw_error = true;
2800 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2801 if (ue_lo & 1)
2802 dev_err(dev, "UE: %s bit set\n",
2803 ue_status_low_desc[i]);
2804 }
2805 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2806 if (ue_hi & 1)
2807 dev_err(dev, "UE: %s bit set\n",
2808 ue_status_hi_desc[i]);
2809 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302810 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002811 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302812 if (error_detected)
2813 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002814}
2815
Sathya Perla8d56ff12009-11-22 22:02:26 +00002816static void be_msix_disable(struct be_adapter *adapter)
2817{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002818 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002819 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002820 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302821 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002822 }
2823}
2824
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002825static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002826{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002827 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002828 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829
Sathya Perla92bf14a2013-08-27 16:57:32 +05302830 /* If RoCE is supported, program the max number of NIC vectors that
2831 * may be configured via set-channels, along with vectors needed for
2832 * RoCe. Else, just program the number we'll use initially.
2833 */
2834 if (be_roce_supported(adapter))
2835 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2836 2 * num_online_cpus());
2837 else
2838 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002839
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002840 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002841 adapter->msix_entries[i].entry = i;
2842
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002843 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2844 MIN_MSIX_VECTORS, num_vec);
2845 if (num_vec < 0)
2846 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002847
Sathya Perla92bf14a2013-08-27 16:57:32 +05302848 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2849 adapter->num_msix_roce_vec = num_vec / 2;
2850 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2851 adapter->num_msix_roce_vec);
2852 }
2853
2854 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2855
2856 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2857 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002858 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002859
2860fail:
2861 dev_warn(dev, "MSIx enable failed\n");
2862
2863 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2864 if (!be_physfn(adapter))
2865 return num_vec;
2866 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002867}
2868
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002869static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302870 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002871{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302872 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002873}
2874
2875static int be_msix_register(struct be_adapter *adapter)
2876{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002877 struct net_device *netdev = adapter->netdev;
2878 struct be_eq_obj *eqo;
2879 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002880
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002881 for_all_evt_queues(adapter, eqo, i) {
2882 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2883 vec = be_msix_vec_get(adapter, eqo);
2884 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002885 if (status)
2886 goto err_msix;
2887 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002888
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002889 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002890err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002891 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2892 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2893 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302894 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002895 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002896 return status;
2897}
2898
2899static int be_irq_register(struct be_adapter *adapter)
2900{
2901 struct net_device *netdev = adapter->netdev;
2902 int status;
2903
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002904 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002905 status = be_msix_register(adapter);
2906 if (status == 0)
2907 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002908 /* INTx is not supported for VF */
2909 if (!be_physfn(adapter))
2910 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911 }
2912
Sathya Perlae49cc342012-11-27 19:50:02 +00002913 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002914 netdev->irq = adapter->pdev->irq;
2915 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002916 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002917 if (status) {
2918 dev_err(&adapter->pdev->dev,
2919 "INTx request IRQ failed - err %d\n", status);
2920 return status;
2921 }
2922done:
2923 adapter->isr_registered = true;
2924 return 0;
2925}
2926
2927static void be_irq_unregister(struct be_adapter *adapter)
2928{
2929 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002930 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002931 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932
2933 if (!adapter->isr_registered)
2934 return;
2935
2936 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002937 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002938 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939 goto done;
2940 }
2941
2942 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002943 for_all_evt_queues(adapter, eqo, i)
2944 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002945
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946done:
2947 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002948}
2949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002951{
2952 struct be_queue_info *q;
2953 struct be_rx_obj *rxo;
2954 int i;
2955
2956 for_all_rx_queues(adapter, rxo, i) {
2957 q = &rxo->q;
2958 if (q->created) {
2959 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002961 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002962 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002963 }
2964}
2965
Sathya Perla889cd4b2010-05-30 23:33:45 +00002966static int be_close(struct net_device *netdev)
2967{
2968 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002969 struct be_eq_obj *eqo;
2970 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002971
Kalesh APe1ad8e32014-04-14 16:12:41 +05302972 /* This protection is needed as be_close() may be called even when the
2973 * adapter is in cleared state (after eeh perm failure)
2974 */
2975 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2976 return 0;
2977
Parav Pandit045508a2012-03-26 14:27:13 +00002978 be_roce_dev_close(adapter);
2979
Ivan Veceradff345c52013-11-27 08:59:32 +01002980 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2981 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002982 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302983 be_disable_busy_poll(eqo);
2984 }
David S. Miller71237b62013-11-28 18:53:36 -05002985 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002986 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002987
2988 be_async_mcc_disable(adapter);
2989
2990 /* Wait for all pending tx completions to arrive so that
2991 * all tx skbs are freed.
2992 */
Sathya Perlafba87552013-05-08 02:05:50 +00002993 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302994 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002995
2996 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05002997 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06002998
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002999 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003000 if (msix_enabled(adapter))
3001 synchronize_irq(be_msix_vec_get(adapter, eqo));
3002 else
3003 synchronize_irq(netdev->irq);
3004 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003005 }
3006
Sathya Perla889cd4b2010-05-30 23:33:45 +00003007 be_irq_unregister(adapter);
3008
Sathya Perla482c9e72011-06-29 23:33:17 +00003009 return 0;
3010}
3011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003012static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003013{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003014 struct rss_info *rss = &adapter->rss_info;
3015 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003016 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003017 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003018
3019 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003020 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3021 sizeof(struct be_eth_rx_d));
3022 if (rc)
3023 return rc;
3024 }
3025
3026 /* The FW would like the default RXQ to be created first */
3027 rxo = default_rxo(adapter);
3028 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3029 adapter->if_handle, false, &rxo->rss_id);
3030 if (rc)
3031 return rc;
3032
3033 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003034 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003035 rx_frag_size, adapter->if_handle,
3036 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003037 if (rc)
3038 return rc;
3039 }
3040
3041 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303042 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3043 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003044 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303045 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003046 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303047 rss->rsstable[j + i] = rxo->rss_id;
3048 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003049 }
3050 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303051 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3052 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003053
3054 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303055 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3056 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303057 } else {
3058 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303059 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303060 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003061
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003062 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303063 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003064 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303065 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303066 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303067 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003068 }
3069
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003070 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303071
Sathya Perla482c9e72011-06-29 23:33:17 +00003072 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003073 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303074 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003075 return 0;
3076}
3077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078static int be_open(struct net_device *netdev)
3079{
3080 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003081 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003082 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003083 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003084 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003085 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003086
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003087 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003088 if (status)
3089 goto err;
3090
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003091 status = be_irq_register(adapter);
3092 if (status)
3093 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003094
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003095 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003096 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003097
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003098 for_all_tx_queues(adapter, txo, i)
3099 be_cq_notify(adapter, txo->cq.id, true, 0);
3100
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003101 be_async_mcc_enable(adapter);
3102
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003103 for_all_evt_queues(adapter, eqo, i) {
3104 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303105 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303106 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003107 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003108 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109
Sathya Perla323ff712012-09-28 04:39:43 +00003110 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003111 if (!status)
3112 be_link_status_update(adapter, link_status);
3113
Sathya Perlafba87552013-05-08 02:05:50 +00003114 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003115 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303116
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303117#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303118 if (skyhawk_chip(adapter))
3119 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303120#endif
3121
Sathya Perla889cd4b2010-05-30 23:33:45 +00003122 return 0;
3123err:
3124 be_close(adapter->netdev);
3125 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003126}
3127
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003128static int be_setup_wol(struct be_adapter *adapter, bool enable)
3129{
3130 struct be_dma_mem cmd;
3131 int status = 0;
3132 u8 mac[ETH_ALEN];
3133
3134 memset(mac, 0, ETH_ALEN);
3135
3136 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003137 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3138 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303139 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303140 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003141
3142 if (enable) {
3143 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303144 PCICFG_PM_CONTROL_OFFSET,
3145 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003146 if (status) {
3147 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003148 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003149 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3150 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003151 return status;
3152 }
3153 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303154 adapter->netdev->dev_addr,
3155 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003156 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3157 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3158 } else {
3159 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3160 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3161 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3162 }
3163
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003164 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003165 return status;
3166}
3167
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003168static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3169{
3170 u32 addr;
3171
3172 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3173
3174 mac[5] = (u8)(addr & 0xFF);
3175 mac[4] = (u8)((addr >> 8) & 0xFF);
3176 mac[3] = (u8)((addr >> 16) & 0xFF);
3177 /* Use the OUI from the current MAC address */
3178 memcpy(mac, adapter->netdev->dev_addr, 3);
3179}
3180
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003181/*
3182 * Generate a seed MAC address from the PF MAC Address using jhash.
3183 * MAC Address for VFs are assigned incrementally starting from the seed.
3184 * These addresses are programmed in the ASIC by the PF and the VF driver
3185 * queries for the MAC address during its probe.
3186 */
Sathya Perla4c876612013-02-03 20:30:11 +00003187static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003188{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003189 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003190 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003191 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003192 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003193
3194 be_vf_eth_addr_generate(adapter, mac);
3195
Sathya Perla11ac75e2011-12-13 00:58:50 +00003196 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303197 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003198 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003199 vf_cfg->if_handle,
3200 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303201 else
3202 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3203 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003204
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003205 if (status)
3206 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303207 "Mac address assignment failed for VF %d\n",
3208 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003209 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003210 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003211
3212 mac[5] += 1;
3213 }
3214 return status;
3215}
3216
Sathya Perla4c876612013-02-03 20:30:11 +00003217static int be_vfs_mac_query(struct be_adapter *adapter)
3218{
3219 int status, vf;
3220 u8 mac[ETH_ALEN];
3221 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003222
3223 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303224 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3225 mac, vf_cfg->if_handle,
3226 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003227 if (status)
3228 return status;
3229 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3230 }
3231 return 0;
3232}
3233
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003234static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003235{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003236 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003237 u32 vf;
3238
Sathya Perla257a3fe2013-06-14 15:54:51 +05303239 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003240 dev_warn(&adapter->pdev->dev,
3241 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003242 goto done;
3243 }
3244
Sathya Perlab4c1df92013-05-08 02:05:47 +00003245 pci_disable_sriov(adapter->pdev);
3246
Sathya Perla11ac75e2011-12-13 00:58:50 +00003247 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303248 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003249 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3250 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303251 else
3252 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3253 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003254
Sathya Perla11ac75e2011-12-13 00:58:50 +00003255 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3256 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003257done:
3258 kfree(adapter->vf_cfg);
3259 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303260 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003261}
3262
Sathya Perla77071332013-08-27 16:57:34 +05303263static void be_clear_queues(struct be_adapter *adapter)
3264{
3265 be_mcc_queues_destroy(adapter);
3266 be_rx_cqs_destroy(adapter);
3267 be_tx_queues_destroy(adapter);
3268 be_evt_queues_destroy(adapter);
3269}
3270
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303271static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003272{
Sathya Perla191eb752012-02-23 18:50:13 +00003273 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3274 cancel_delayed_work_sync(&adapter->work);
3275 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3276 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303277}
3278
Somnath Koturb05004a2013-12-05 12:08:16 +05303279static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303280{
Somnath Koturb05004a2013-12-05 12:08:16 +05303281 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003282 be_cmd_pmac_del(adapter, adapter->if_handle,
3283 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303284 kfree(adapter->pmac_id);
3285 adapter->pmac_id = NULL;
3286 }
3287}
3288
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303289#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303290static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3291{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003292 struct net_device *netdev = adapter->netdev;
3293
Sathya Perlac9c47142014-03-27 10:46:19 +05303294 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3295 be_cmd_manage_iface(adapter, adapter->if_handle,
3296 OP_CONVERT_TUNNEL_TO_NORMAL);
3297
3298 if (adapter->vxlan_port)
3299 be_cmd_set_vxlan_port(adapter, 0);
3300
3301 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3302 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003303
3304 netdev->hw_enc_features = 0;
3305 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303306 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303307}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303308#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303309
Somnath Koturb05004a2013-12-05 12:08:16 +05303310static int be_clear(struct be_adapter *adapter)
3311{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303312 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003313
Sathya Perla11ac75e2011-12-13 00:58:50 +00003314 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003315 be_vf_clear(adapter);
3316
Vasundhara Volambec84e62014-06-30 13:01:32 +05303317 /* Re-configure FW to distribute resources evenly across max-supported
3318 * number of VFs, only when VFs are not already enabled.
3319 */
3320 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3321 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3322 pci_sriov_get_totalvfs(adapter->pdev));
3323
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303324#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303325 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303326#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303327 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303328 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003329
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003330 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003331
Sathya Perla77071332013-08-27 16:57:34 +05303332 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003334 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303335 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003336 return 0;
3337}
3338
Kalesh AP0700d812015-01-20 03:51:43 -05003339static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3340 u32 cap_flags, u32 vf)
3341{
3342 u32 en_flags;
3343 int status;
3344
3345 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3346 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3347 BE_IF_FLAGS_RSS;
3348
3349 en_flags &= cap_flags;
3350
3351 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3352 if_handle, vf);
3353
3354 return status;
3355}
3356
Sathya Perla4c876612013-02-03 20:30:11 +00003357static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003358{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303359 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003360 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003361 u32 cap_flags, vf;
3362 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003363
Kalesh AP0700d812015-01-20 03:51:43 -05003364 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003365 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3366 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003367
Sathya Perla4c876612013-02-03 20:30:11 +00003368 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303369 if (!BE3_chip(adapter)) {
3370 status = be_cmd_get_profile_config(adapter, &res,
3371 vf + 1);
3372 if (!status)
3373 cap_flags = res.if_cap_flags;
3374 }
Sathya Perla4c876612013-02-03 20:30:11 +00003375
Kalesh AP0700d812015-01-20 03:51:43 -05003376 status = be_if_create(adapter, &vf_cfg->if_handle,
3377 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003378 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003379 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003380 }
Kalesh AP0700d812015-01-20 03:51:43 -05003381
3382 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003383}
3384
Sathya Perla39f1d942012-05-08 19:41:24 +00003385static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003386{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003387 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003388 int vf;
3389
Sathya Perla39f1d942012-05-08 19:41:24 +00003390 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3391 GFP_KERNEL);
3392 if (!adapter->vf_cfg)
3393 return -ENOMEM;
3394
Sathya Perla11ac75e2011-12-13 00:58:50 +00003395 for_all_vfs(adapter, vf_cfg, vf) {
3396 vf_cfg->if_handle = -1;
3397 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003398 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003399 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003400}
3401
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003402static int be_vf_setup(struct be_adapter *adapter)
3403{
Sathya Perla4c876612013-02-03 20:30:11 +00003404 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303405 struct be_vf_cfg *vf_cfg;
3406 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303407 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003408
Sathya Perla257a3fe2013-06-14 15:54:51 +05303409 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003410
3411 status = be_vf_setup_init(adapter);
3412 if (status)
3413 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003414
Sathya Perla4c876612013-02-03 20:30:11 +00003415 if (old_vfs) {
3416 for_all_vfs(adapter, vf_cfg, vf) {
3417 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3418 if (status)
3419 goto err;
3420 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003421
Sathya Perla4c876612013-02-03 20:30:11 +00003422 status = be_vfs_mac_query(adapter);
3423 if (status)
3424 goto err;
3425 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303426 status = be_vfs_if_create(adapter);
3427 if (status)
3428 goto err;
3429
Sathya Perla39f1d942012-05-08 19:41:24 +00003430 status = be_vf_eth_addr_config(adapter);
3431 if (status)
3432 goto err;
3433 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003434
Sathya Perla11ac75e2011-12-13 00:58:50 +00003435 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303436 /* Allow VFs to programs MAC/VLAN filters */
3437 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3438 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3439 status = be_cmd_set_fn_privileges(adapter,
3440 privileges |
3441 BE_PRIV_FILTMGMT,
3442 vf + 1);
3443 if (!status)
3444 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3445 vf);
3446 }
3447
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303448 /* Allow full available bandwidth */
3449 if (!old_vfs)
3450 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003451
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303452 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303453 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303454 be_cmd_set_logical_link_config(adapter,
3455 IFLA_VF_LINK_STATE_AUTO,
3456 vf+1);
3457 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003458 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003459
3460 if (!old_vfs) {
3461 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3462 if (status) {
3463 dev_err(dev, "SRIOV enable failed\n");
3464 adapter->num_vfs = 0;
3465 goto err;
3466 }
3467 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303468
3469 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003470 return 0;
3471err:
Sathya Perla4c876612013-02-03 20:30:11 +00003472 dev_err(dev, "VF setup failed\n");
3473 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003474 return status;
3475}
3476
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303477/* Converting function_mode bits on BE3 to SH mc_type enums */
3478
3479static u8 be_convert_mc_type(u32 function_mode)
3480{
Suresh Reddy66064db2014-06-23 16:41:29 +05303481 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303482 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303483 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303484 return FLEX10;
3485 else if (function_mode & VNIC_MODE)
3486 return vNIC2;
3487 else if (function_mode & UMC_ENABLED)
3488 return UMC;
3489 else
3490 return MC_NONE;
3491}
3492
Sathya Perla92bf14a2013-08-27 16:57:32 +05303493/* On BE2/BE3 FW does not suggest the supported limits */
3494static void BEx_get_resources(struct be_adapter *adapter,
3495 struct be_resources *res)
3496{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303497 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303498
3499 if (be_physfn(adapter))
3500 res->max_uc_mac = BE_UC_PMAC_COUNT;
3501 else
3502 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3503
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303504 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3505
3506 if (be_is_mc(adapter)) {
3507 /* Assuming that there are 4 channels per port,
3508 * when multi-channel is enabled
3509 */
3510 if (be_is_qnq_mode(adapter))
3511 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3512 else
3513 /* In a non-qnq multichannel mode, the pvid
3514 * takes up one vlan entry
3515 */
3516 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3517 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303518 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303519 }
3520
Sathya Perla92bf14a2013-08-27 16:57:32 +05303521 res->max_mcast_mac = BE_MAX_MC;
3522
Vasundhara Volama5243da2014-03-11 18:53:07 +05303523 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3524 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3525 * *only* if it is RSS-capable.
3526 */
3527 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3528 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303529 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303530 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303531 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3532 struct be_resources super_nic_res = {0};
3533
3534 /* On a SuperNIC profile, the driver needs to use the
3535 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3536 */
3537 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3538 /* Some old versions of BE3 FW don't report max_tx_qs value */
3539 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3540 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303541 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303542 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303543
3544 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3545 !use_sriov && be_physfn(adapter))
3546 res->max_rss_qs = (adapter->be3_native) ?
3547 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3548 res->max_rx_qs = res->max_rss_qs + 1;
3549
Suresh Reddye3dc8672014-01-06 13:02:25 +05303550 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303551 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303552 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3553 else
3554 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303555
3556 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3557 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3558 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3559}
3560
Sathya Perla30128032011-11-10 19:17:57 +00003561static void be_setup_init(struct be_adapter *adapter)
3562{
3563 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003564 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003565 adapter->if_handle = -1;
3566 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003567 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003568 if (be_physfn(adapter))
3569 adapter->cmd_privileges = MAX_PRIVILEGES;
3570 else
3571 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003572}
3573
Vasundhara Volambec84e62014-06-30 13:01:32 +05303574static int be_get_sriov_config(struct be_adapter *adapter)
3575{
3576 struct device *dev = &adapter->pdev->dev;
3577 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303578 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303579
3580 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303581 be_cmd_get_profile_config(adapter, &res, 0);
3582
Vasundhara Volambec84e62014-06-30 13:01:32 +05303583 if (BE3_chip(adapter) && !res.max_vfs) {
3584 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3585 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3586 }
3587
Sathya Perlad3d18312014-08-01 17:47:30 +05303588 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303589
3590 if (!be_max_vfs(adapter)) {
3591 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303592 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303593 adapter->num_vfs = 0;
3594 return 0;
3595 }
3596
Sathya Perlad3d18312014-08-01 17:47:30 +05303597 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3598
Vasundhara Volambec84e62014-06-30 13:01:32 +05303599 /* validate num_vfs module param */
3600 old_vfs = pci_num_vf(adapter->pdev);
3601 if (old_vfs) {
3602 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3603 if (old_vfs != num_vfs)
3604 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3605 adapter->num_vfs = old_vfs;
3606 } else {
3607 if (num_vfs > be_max_vfs(adapter)) {
3608 dev_info(dev, "Resources unavailable to init %d VFs\n",
3609 num_vfs);
3610 dev_info(dev, "Limiting to %d VFs\n",
3611 be_max_vfs(adapter));
3612 }
3613 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3614 }
3615
3616 return 0;
3617}
3618
Sathya Perla92bf14a2013-08-27 16:57:32 +05303619static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003620{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303621 struct device *dev = &adapter->pdev->dev;
3622 struct be_resources res = {0};
3623 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003624
Sathya Perla92bf14a2013-08-27 16:57:32 +05303625 if (BEx_chip(adapter)) {
3626 BEx_get_resources(adapter, &res);
3627 adapter->res = res;
3628 }
3629
Sathya Perla92bf14a2013-08-27 16:57:32 +05303630 /* For Lancer, SH etc read per-function resource limits from FW.
3631 * GET_FUNC_CONFIG returns per function guaranteed limits.
3632 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3633 */
Sathya Perla4c876612013-02-03 20:30:11 +00003634 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303635 status = be_cmd_get_func_config(adapter, &res);
3636 if (status)
3637 return status;
3638
3639 /* If RoCE may be enabled stash away half the EQs for RoCE */
3640 if (be_roce_supported(adapter))
3641 res.max_evt_qs /= 2;
3642 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003643 }
3644
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303645 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3646 be_max_txqs(adapter), be_max_rxqs(adapter),
3647 be_max_rss(adapter), be_max_eqs(adapter),
3648 be_max_vfs(adapter));
3649 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3650 be_max_uc(adapter), be_max_mc(adapter),
3651 be_max_vlans(adapter));
3652
Sathya Perla92bf14a2013-08-27 16:57:32 +05303653 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003654}
3655
Sathya Perlad3d18312014-08-01 17:47:30 +05303656static void be_sriov_config(struct be_adapter *adapter)
3657{
3658 struct device *dev = &adapter->pdev->dev;
3659 int status;
3660
3661 status = be_get_sriov_config(adapter);
3662 if (status) {
3663 dev_err(dev, "Failed to query SR-IOV configuration\n");
3664 dev_err(dev, "SR-IOV cannot be enabled\n");
3665 return;
3666 }
3667
3668 /* When the HW is in SRIOV capable configuration, the PF-pool
3669 * resources are equally distributed across the max-number of
3670 * VFs. The user may request only a subset of the max-vfs to be
3671 * enabled. Based on num_vfs, redistribute the resources across
3672 * num_vfs so that each VF will have access to more number of
3673 * resources. This facility is not available in BE3 FW.
3674 * Also, this is done by FW in Lancer chip.
3675 */
3676 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3677 status = be_cmd_set_sriov_config(adapter,
3678 adapter->pool_res,
3679 adapter->num_vfs);
3680 if (status)
3681 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3682 }
3683}
3684
Sathya Perla39f1d942012-05-08 19:41:24 +00003685static int be_get_config(struct be_adapter *adapter)
3686{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303687 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003688 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003689
Kalesh APe97e3cd2014-07-17 16:20:26 +05303690 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003691 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303692 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003693
Vasundhara Volam21252372015-02-06 08:18:42 -05003694 be_cmd_query_port_name(adapter);
3695
3696 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303697 status = be_cmd_get_active_profile(adapter, &profile_id);
3698 if (!status)
3699 dev_info(&adapter->pdev->dev,
3700 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303701 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303702
Sathya Perlad3d18312014-08-01 17:47:30 +05303703 if (!BE2_chip(adapter) && be_physfn(adapter))
3704 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303705
Sathya Perla92bf14a2013-08-27 16:57:32 +05303706 status = be_get_resources(adapter);
3707 if (status)
3708 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003709
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303710 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3711 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303712 if (!adapter->pmac_id)
3713 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003714
Sathya Perla92bf14a2013-08-27 16:57:32 +05303715 /* Sanitize cfg_num_qs based on HW and platform limits */
3716 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3717
3718 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003719}
3720
Sathya Perla95046b92013-07-23 15:25:02 +05303721static int be_mac_setup(struct be_adapter *adapter)
3722{
3723 u8 mac[ETH_ALEN];
3724 int status;
3725
3726 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3727 status = be_cmd_get_perm_mac(adapter, mac);
3728 if (status)
3729 return status;
3730
3731 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3732 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3733 } else {
3734 /* Maybe the HW was reset; dev_addr must be re-programmed */
3735 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3736 }
3737
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003738 /* For BE3-R VFs, the PF programs the initial MAC address */
3739 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3740 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3741 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303742 return 0;
3743}
3744
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303745static void be_schedule_worker(struct be_adapter *adapter)
3746{
3747 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3748 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3749}
3750
Sathya Perla77071332013-08-27 16:57:34 +05303751static int be_setup_queues(struct be_adapter *adapter)
3752{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303753 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303754 int status;
3755
3756 status = be_evt_queues_create(adapter);
3757 if (status)
3758 goto err;
3759
3760 status = be_tx_qs_create(adapter);
3761 if (status)
3762 goto err;
3763
3764 status = be_rx_cqs_create(adapter);
3765 if (status)
3766 goto err;
3767
3768 status = be_mcc_queues_create(adapter);
3769 if (status)
3770 goto err;
3771
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303772 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3773 if (status)
3774 goto err;
3775
3776 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3777 if (status)
3778 goto err;
3779
Sathya Perla77071332013-08-27 16:57:34 +05303780 return 0;
3781err:
3782 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3783 return status;
3784}
3785
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303786int be_update_queues(struct be_adapter *adapter)
3787{
3788 struct net_device *netdev = adapter->netdev;
3789 int status;
3790
3791 if (netif_running(netdev))
3792 be_close(netdev);
3793
3794 be_cancel_worker(adapter);
3795
3796 /* If any vectors have been shared with RoCE we cannot re-program
3797 * the MSIx table.
3798 */
3799 if (!adapter->num_msix_roce_vec)
3800 be_msix_disable(adapter);
3801
3802 be_clear_queues(adapter);
3803
3804 if (!msix_enabled(adapter)) {
3805 status = be_msix_enable(adapter);
3806 if (status)
3807 return status;
3808 }
3809
3810 status = be_setup_queues(adapter);
3811 if (status)
3812 return status;
3813
3814 be_schedule_worker(adapter);
3815
3816 if (netif_running(netdev))
3817 status = be_open(netdev);
3818
3819 return status;
3820}
3821
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003822static inline int fw_major_num(const char *fw_ver)
3823{
3824 int fw_major = 0, i;
3825
3826 i = sscanf(fw_ver, "%d.", &fw_major);
3827 if (i != 1)
3828 return 0;
3829
3830 return fw_major;
3831}
3832
Sathya Perla5fb379e2009-06-18 00:02:59 +00003833static int be_setup(struct be_adapter *adapter)
3834{
Sathya Perla39f1d942012-05-08 19:41:24 +00003835 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003836 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837
Sathya Perla30128032011-11-10 19:17:57 +00003838 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003839
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003840 if (!lancer_chip(adapter))
3841 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003842
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003843 status = be_get_config(adapter);
3844 if (status)
3845 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003846
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003847 status = be_msix_enable(adapter);
3848 if (status)
3849 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003850
Kalesh AP0700d812015-01-20 03:51:43 -05003851 status = be_if_create(adapter, &adapter->if_handle,
3852 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003853 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003854 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303856 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3857 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303858 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303859 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003860 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003861 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003862
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003863 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003864
Sathya Perla95046b92013-07-23 15:25:02 +05303865 status = be_mac_setup(adapter);
3866 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003867 goto err;
3868
Kalesh APe97e3cd2014-07-17 16:20:26 +05303869 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303870 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003871
Somnath Koture9e2a902013-10-24 14:37:53 +05303872 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303873 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303874 adapter->fw_ver);
3875 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3876 }
3877
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003878 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003879 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003880
3881 be_set_rx_mode(adapter->netdev);
3882
Suresh Reddy76a9e082014-01-15 13:23:40 +05303883 be_cmd_get_acpi_wol_cap(adapter);
3884
Kalesh AP00d594c2015-01-20 03:51:44 -05003885 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3886 adapter->rx_fc);
3887 if (status)
3888 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3889 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003890
Kalesh AP00d594c2015-01-20 03:51:44 -05003891 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3892 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003893
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303894 if (be_physfn(adapter))
3895 be_cmd_set_logical_link_config(adapter,
3896 IFLA_VF_LINK_STATE_AUTO, 0);
3897
Vasundhara Volambec84e62014-06-30 13:01:32 +05303898 if (adapter->num_vfs)
3899 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003900
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003901 status = be_cmd_get_phy_info(adapter);
3902 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003903 adapter->phy.fc_autoneg = 1;
3904
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303905 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303906 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003907 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003908err:
3909 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910 return status;
3911}
3912
Ivan Vecera66268732011-12-08 01:31:21 +00003913#ifdef CONFIG_NET_POLL_CONTROLLER
3914static void be_netpoll(struct net_device *netdev)
3915{
3916 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003917 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003918 int i;
3919
Sathya Perlae49cc342012-11-27 19:50:02 +00003920 for_all_evt_queues(adapter, eqo, i) {
3921 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3922 napi_schedule(&eqo->napi);
3923 }
Ivan Vecera66268732011-12-08 01:31:21 +00003924}
3925#endif
3926
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303927static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003928
Sathya Perla306f1342011-08-02 19:57:45 +00003929static bool phy_flashing_required(struct be_adapter *adapter)
3930{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05003931 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003932 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003933}
3934
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003935static bool is_comp_in_ufi(struct be_adapter *adapter,
3936 struct flash_section_info *fsec, int type)
3937{
3938 int i = 0, img_type = 0;
3939 struct flash_section_info_g2 *fsec_g2 = NULL;
3940
Sathya Perlaca34fe32012-11-06 17:48:56 +00003941 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003942 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3943
3944 for (i = 0; i < MAX_FLASH_COMP; i++) {
3945 if (fsec_g2)
3946 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3947 else
3948 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3949
3950 if (img_type == type)
3951 return true;
3952 }
3953 return false;
3954
3955}
3956
Jingoo Han4188e7d2013-08-05 18:02:02 +09003957static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303958 int header_size,
3959 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003960{
3961 struct flash_section_info *fsec = NULL;
3962 const u8 *p = fw->data;
3963
3964 p += header_size;
3965 while (p < (fw->data + fw->size)) {
3966 fsec = (struct flash_section_info *)p;
3967 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3968 return fsec;
3969 p += 32;
3970 }
3971 return NULL;
3972}
3973
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303974static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3975 u32 img_offset, u32 img_size, int hdr_size,
3976 u16 img_optype, bool *crc_match)
3977{
3978 u32 crc_offset;
3979 int status;
3980 u8 crc[4];
3981
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003982 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3983 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303984 if (status)
3985 return status;
3986
3987 crc_offset = hdr_size + img_offset + img_size - 4;
3988
3989 /* Skip flashing, if crc of flashed region matches */
3990 if (!memcmp(crc, p + crc_offset, 4))
3991 *crc_match = true;
3992 else
3993 *crc_match = false;
3994
3995 return status;
3996}
3997
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003998static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003999 struct be_dma_mem *flash_cmd, int optype, int img_size,
4000 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004001{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004002 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004003 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304004 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004005
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004006 while (total_bytes) {
4007 num_bytes = min_t(u32, 32*1024, total_bytes);
4008
4009 total_bytes -= num_bytes;
4010
4011 if (!total_bytes) {
4012 if (optype == OPTYPE_PHY_FW)
4013 flash_op = FLASHROM_OPER_PHY_FLASH;
4014 else
4015 flash_op = FLASHROM_OPER_FLASH;
4016 } else {
4017 if (optype == OPTYPE_PHY_FW)
4018 flash_op = FLASHROM_OPER_PHY_SAVE;
4019 else
4020 flash_op = FLASHROM_OPER_SAVE;
4021 }
4022
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004023 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004024 img += num_bytes;
4025 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004026 flash_op, img_offset +
4027 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304028 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304029 optype == OPTYPE_PHY_FW)
4030 break;
4031 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004032 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004033
4034 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004035 }
4036 return 0;
4037}
4038
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004039/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004040static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304041 const struct firmware *fw,
4042 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004043{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004044 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304045 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004046 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304047 int status, i, filehdr_size, num_comp;
4048 const struct flash_comp *pflashcomp;
4049 bool crc_match;
4050 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004051
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004052 struct flash_comp gen3_flash_types[] = {
4053 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4054 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4055 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4056 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4057 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4058 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4059 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4060 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4061 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4062 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4063 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4064 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4065 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4066 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4067 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4068 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4069 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4070 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4071 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4072 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004073 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004074
4075 struct flash_comp gen2_flash_types[] = {
4076 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4077 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4078 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4079 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4080 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4081 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4082 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4083 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4084 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4085 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4086 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4087 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4088 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4089 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4090 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4091 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004092 };
4093
Sathya Perlaca34fe32012-11-06 17:48:56 +00004094 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004095 pflashcomp = gen3_flash_types;
4096 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004097 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004098 } else {
4099 pflashcomp = gen2_flash_types;
4100 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004101 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004102 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004103 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004104
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004105 /* Get flash section info*/
4106 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4107 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304108 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004109 return -1;
4110 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004111 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004112 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004113 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004114
4115 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4116 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4117 continue;
4118
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004119 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4120 !phy_flashing_required(adapter))
4121 continue;
4122
4123 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304124 status = be_check_flash_crc(adapter, fw->data,
4125 pflashcomp[i].offset,
4126 pflashcomp[i].size,
4127 filehdr_size +
4128 img_hdrs_size,
4129 OPTYPE_REDBOOT, &crc_match);
4130 if (status) {
4131 dev_err(dev,
4132 "Could not get CRC for 0x%x region\n",
4133 pflashcomp[i].optype);
4134 continue;
4135 }
4136
4137 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004138 continue;
4139 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004140
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304141 p = fw->data + filehdr_size + pflashcomp[i].offset +
4142 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004143 if (p + pflashcomp[i].size > fw->data + fw->size)
4144 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004145
4146 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004147 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004148 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304149 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004150 pflashcomp[i].img_type);
4151 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004152 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004153 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004154 return 0;
4155}
4156
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304157static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4158{
4159 u32 img_type = le32_to_cpu(fsec_entry.type);
4160 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4161
4162 if (img_optype != 0xFFFF)
4163 return img_optype;
4164
4165 switch (img_type) {
4166 case IMAGE_FIRMWARE_iSCSI:
4167 img_optype = OPTYPE_ISCSI_ACTIVE;
4168 break;
4169 case IMAGE_BOOT_CODE:
4170 img_optype = OPTYPE_REDBOOT;
4171 break;
4172 case IMAGE_OPTION_ROM_ISCSI:
4173 img_optype = OPTYPE_BIOS;
4174 break;
4175 case IMAGE_OPTION_ROM_PXE:
4176 img_optype = OPTYPE_PXE_BIOS;
4177 break;
4178 case IMAGE_OPTION_ROM_FCoE:
4179 img_optype = OPTYPE_FCOE_BIOS;
4180 break;
4181 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4182 img_optype = OPTYPE_ISCSI_BACKUP;
4183 break;
4184 case IMAGE_NCSI:
4185 img_optype = OPTYPE_NCSI_FW;
4186 break;
4187 case IMAGE_FLASHISM_JUMPVECTOR:
4188 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4189 break;
4190 case IMAGE_FIRMWARE_PHY:
4191 img_optype = OPTYPE_SH_PHY_FW;
4192 break;
4193 case IMAGE_REDBOOT_DIR:
4194 img_optype = OPTYPE_REDBOOT_DIR;
4195 break;
4196 case IMAGE_REDBOOT_CONFIG:
4197 img_optype = OPTYPE_REDBOOT_CONFIG;
4198 break;
4199 case IMAGE_UFI_DIR:
4200 img_optype = OPTYPE_UFI_DIR;
4201 break;
4202 default:
4203 break;
4204 }
4205
4206 return img_optype;
4207}
4208
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004209static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304210 const struct firmware *fw,
4211 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004212{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004213 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004214 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304215 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004216 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304217 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004218 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304219 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304220 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004221
4222 filehdr_size = sizeof(struct flash_file_hdr_g3);
4223 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4224 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304225 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304226 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004227 }
4228
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004229retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004230 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4231 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4232 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304233 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4234 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4235 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004236
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304237 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004238 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004239
4240 if (flash_offset_support)
4241 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4242 else
4243 flash_optype = img_optype;
4244
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304245 /* Don't bother verifying CRC if an old FW image is being
4246 * flashed
4247 */
4248 if (old_fw_img)
4249 goto flash;
4250
4251 status = be_check_flash_crc(adapter, fw->data, img_offset,
4252 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004253 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304254 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304255 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4256 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004257 /* The current FW image on the card does not support
4258 * OFFSET based flashing. Retry using older mechanism
4259 * of OPTYPE based flashing
4260 */
4261 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4262 flash_offset_support = false;
4263 goto retry_flash;
4264 }
4265
4266 /* The current FW image on the card does not recognize
4267 * the new FLASH op_type. The FW download is partially
4268 * complete. Reboot the server now to enable FW image
4269 * to recognize the new FLASH op_type. To complete the
4270 * remaining process, download the same FW again after
4271 * the reboot.
4272 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304273 dev_err(dev, "Flash incomplete. Reset the server\n");
4274 dev_err(dev, "Download FW image again after reset\n");
4275 return -EAGAIN;
4276 } else if (status) {
4277 dev_err(dev, "Could not get CRC for 0x%x region\n",
4278 img_optype);
4279 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004280 }
4281
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304282 if (crc_match)
4283 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004284
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304285flash:
4286 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004287 if (p + img_size > fw->data + fw->size)
4288 return -1;
4289
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004290 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4291 img_offset);
4292
4293 /* The current FW image on the card does not support OFFSET
4294 * based flashing. Retry using older mechanism of OPTYPE based
4295 * flashing
4296 */
4297 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4298 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4299 flash_offset_support = false;
4300 goto retry_flash;
4301 }
4302
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304303 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4304 * UFI_DIR region
4305 */
Kalesh AP4c600052014-05-30 19:06:26 +05304306 if (old_fw_img &&
4307 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4308 (img_optype == OPTYPE_UFI_DIR &&
4309 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304310 continue;
4311 } else if (status) {
4312 dev_err(dev, "Flashing section type 0x%x failed\n",
4313 img_type);
4314 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004315 }
4316 }
4317 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004318}
4319
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004320static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304321 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004322{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004323#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4324#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304325 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004326 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004327 const u8 *data_ptr = NULL;
4328 u8 *dest_image_ptr = NULL;
4329 size_t image_size = 0;
4330 u32 chunk_size = 0;
4331 u32 data_written = 0;
4332 u32 offset = 0;
4333 int status = 0;
4334 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004335 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004336
4337 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304338 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304339 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004340 }
4341
4342 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4343 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304344 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004345 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304346 if (!flash_cmd.va)
4347 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004348
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004349 dest_image_ptr = flash_cmd.va +
4350 sizeof(struct lancer_cmd_req_write_object);
4351 image_size = fw->size;
4352 data_ptr = fw->data;
4353
4354 while (image_size) {
4355 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4356
4357 /* Copy the image chunk content. */
4358 memcpy(dest_image_ptr, data_ptr, chunk_size);
4359
4360 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004361 chunk_size, offset,
4362 LANCER_FW_DOWNLOAD_LOCATION,
4363 &data_written, &change_status,
4364 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004365 if (status)
4366 break;
4367
4368 offset += data_written;
4369 data_ptr += data_written;
4370 image_size -= data_written;
4371 }
4372
4373 if (!status) {
4374 /* Commit the FW written */
4375 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004376 0, offset,
4377 LANCER_FW_DOWNLOAD_LOCATION,
4378 &data_written, &change_status,
4379 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004380 }
4381
Kalesh APbb864e02014-09-02 09:56:51 +05304382 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004383 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304384 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304385 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004386 }
4387
Kalesh APbb864e02014-09-02 09:56:51 +05304388 dev_info(dev, "Firmware flashed successfully\n");
4389
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004390 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304391 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004392 status = lancer_physdev_ctrl(adapter,
4393 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004394 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304395 dev_err(dev, "Adapter busy, could not reset FW\n");
4396 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004397 }
4398 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304399 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004400 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304401
4402 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004403}
4404
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004405#define BE2_UFI 2
4406#define BE3_UFI 3
4407#define BE3R_UFI 10
4408#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004409#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004410
Sathya Perlaca34fe32012-11-06 17:48:56 +00004411static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004412 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004413{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004414 if (!fhdr) {
4415 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4416 return -1;
4417 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004418
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004419 /* First letter of the build version is used to identify
4420 * which chip this image file is meant for.
4421 */
4422 switch (fhdr->build[0]) {
4423 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004424 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4425 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004426 case BLD_STR_UFI_TYPE_BE3:
4427 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4428 BE3_UFI;
4429 case BLD_STR_UFI_TYPE_BE2:
4430 return BE2_UFI;
4431 default:
4432 return -1;
4433 }
4434}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004435
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004436/* Check if the flash image file is compatible with the adapter that
4437 * is being flashed.
4438 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004439 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004440 */
4441static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4442 struct flash_file_hdr_g3 *fhdr)
4443{
4444 int ufi_type = be_get_ufi_type(adapter, fhdr);
4445
4446 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004447 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004448 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004449 case SH_UFI:
4450 return (skyhawk_chip(adapter) &&
4451 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004452 case BE3R_UFI:
4453 return BE3_chip(adapter);
4454 case BE3_UFI:
4455 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4456 case BE2_UFI:
4457 return BE2_chip(adapter);
4458 default:
4459 return false;
4460 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004461}
4462
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004463static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4464{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004465 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004466 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004467 struct image_hdr *img_hdr_ptr;
4468 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004469 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004470
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004471 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4472 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4473 dev_err(dev, "Flash image is not compatible with adapter\n");
4474 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004475 }
4476
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004477 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4478 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4479 GFP_KERNEL);
4480 if (!flash_cmd.va)
4481 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004482
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004483 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4484 for (i = 0; i < num_imgs; i++) {
4485 img_hdr_ptr = (struct image_hdr *)(fw->data +
4486 (sizeof(struct flash_file_hdr_g3) +
4487 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004488 if (!BE2_chip(adapter) &&
4489 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4490 continue;
4491
4492 if (skyhawk_chip(adapter))
4493 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4494 num_imgs);
4495 else
4496 status = be_flash_BEx(adapter, fw, &flash_cmd,
4497 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004498 }
4499
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004500 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4501 if (!status)
4502 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004503
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004504 return status;
4505}
4506
4507int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4508{
4509 const struct firmware *fw;
4510 int status;
4511
4512 if (!netif_running(adapter->netdev)) {
4513 dev_err(&adapter->pdev->dev,
4514 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304515 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004516 }
4517
4518 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4519 if (status)
4520 goto fw_exit;
4521
4522 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4523
4524 if (lancer_chip(adapter))
4525 status = lancer_fw_download(adapter, fw);
4526 else
4527 status = be_fw_download(adapter, fw);
4528
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004529 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304530 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004531
Ajit Khaparde84517482009-09-04 03:12:16 +00004532fw_exit:
4533 release_firmware(fw);
4534 return status;
4535}
4536
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004537static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4538 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004539{
4540 struct be_adapter *adapter = netdev_priv(dev);
4541 struct nlattr *attr, *br_spec;
4542 int rem;
4543 int status = 0;
4544 u16 mode = 0;
4545
4546 if (!sriov_enabled(adapter))
4547 return -EOPNOTSUPP;
4548
4549 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004550 if (!br_spec)
4551 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004552
4553 nla_for_each_nested(attr, br_spec, rem) {
4554 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4555 continue;
4556
Thomas Grafb7c1a312014-11-26 13:42:17 +01004557 if (nla_len(attr) < sizeof(mode))
4558 return -EINVAL;
4559
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004560 mode = nla_get_u16(attr);
4561 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4562 return -EINVAL;
4563
4564 status = be_cmd_set_hsw_config(adapter, 0, 0,
4565 adapter->if_handle,
4566 mode == BRIDGE_MODE_VEPA ?
4567 PORT_FWD_TYPE_VEPA :
4568 PORT_FWD_TYPE_VEB);
4569 if (status)
4570 goto err;
4571
4572 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4573 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4574
4575 return status;
4576 }
4577err:
4578 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4579 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4580
4581 return status;
4582}
4583
4584static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304585 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004586{
4587 struct be_adapter *adapter = netdev_priv(dev);
4588 int status = 0;
4589 u8 hsw_mode;
4590
4591 if (!sriov_enabled(adapter))
4592 return 0;
4593
4594 /* BE and Lancer chips support VEB mode only */
4595 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4596 hsw_mode = PORT_FWD_TYPE_VEB;
4597 } else {
4598 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4599 adapter->if_handle, &hsw_mode);
4600 if (status)
4601 return 0;
4602 }
4603
4604 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4605 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004606 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4607 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004608}
4609
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304610#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004611/* VxLAN offload Notes:
4612 *
4613 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4614 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4615 * is expected to work across all types of IP tunnels once exported. Skyhawk
4616 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304617 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4618 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4619 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004620 *
4621 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4622 * adds more than one port, disable offloads and don't re-enable them again
4623 * until after all the tunnels are removed.
4624 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304625static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4626 __be16 port)
4627{
4628 struct be_adapter *adapter = netdev_priv(netdev);
4629 struct device *dev = &adapter->pdev->dev;
4630 int status;
4631
4632 if (lancer_chip(adapter) || BEx_chip(adapter))
4633 return;
4634
4635 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304636 dev_info(dev,
4637 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004638 dev_info(dev, "Disabling VxLAN offloads\n");
4639 adapter->vxlan_port_count++;
4640 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304641 }
4642
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004643 if (adapter->vxlan_port_count++ >= 1)
4644 return;
4645
Sathya Perlac9c47142014-03-27 10:46:19 +05304646 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4647 OP_CONVERT_NORMAL_TO_TUNNEL);
4648 if (status) {
4649 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4650 goto err;
4651 }
4652
4653 status = be_cmd_set_vxlan_port(adapter, port);
4654 if (status) {
4655 dev_warn(dev, "Failed to add VxLAN port\n");
4656 goto err;
4657 }
4658 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4659 adapter->vxlan_port = port;
4660
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004661 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4662 NETIF_F_TSO | NETIF_F_TSO6 |
4663 NETIF_F_GSO_UDP_TUNNEL;
4664 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304665 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004666
Sathya Perlac9c47142014-03-27 10:46:19 +05304667 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4668 be16_to_cpu(port));
4669 return;
4670err:
4671 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304672}
4673
4674static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4675 __be16 port)
4676{
4677 struct be_adapter *adapter = netdev_priv(netdev);
4678
4679 if (lancer_chip(adapter) || BEx_chip(adapter))
4680 return;
4681
4682 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004683 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304684
4685 be_disable_vxlan_offloads(adapter);
4686
4687 dev_info(&adapter->pdev->dev,
4688 "Disabled VxLAN offloads for UDP port %d\n",
4689 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004690done:
4691 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304692}
Joe Stringer725d5482014-11-13 16:38:13 -08004693
Jesse Gross5f352272014-12-23 22:37:26 -08004694static netdev_features_t be_features_check(struct sk_buff *skb,
4695 struct net_device *dev,
4696 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004697{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304698 struct be_adapter *adapter = netdev_priv(dev);
4699 u8 l4_hdr = 0;
4700
4701 /* The code below restricts offload features for some tunneled packets.
4702 * Offload features for normal (non tunnel) packets are unchanged.
4703 */
4704 if (!skb->encapsulation ||
4705 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4706 return features;
4707
4708 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4709 * should disable tunnel offload features if it's not a VxLAN packet,
4710 * as tunnel offloads have been enabled only for VxLAN. This is done to
4711 * allow other tunneled traffic like GRE work fine while VxLAN
4712 * offloads are configured in Skyhawk-R.
4713 */
4714 switch (vlan_get_protocol(skb)) {
4715 case htons(ETH_P_IP):
4716 l4_hdr = ip_hdr(skb)->protocol;
4717 break;
4718 case htons(ETH_P_IPV6):
4719 l4_hdr = ipv6_hdr(skb)->nexthdr;
4720 break;
4721 default:
4722 return features;
4723 }
4724
4725 if (l4_hdr != IPPROTO_UDP ||
4726 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4727 skb->inner_protocol != htons(ETH_P_TEB) ||
4728 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4729 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4730 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4731
4732 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004733}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304734#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304735
stephen hemmingere5686ad2012-01-05 19:10:25 +00004736static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004737 .ndo_open = be_open,
4738 .ndo_stop = be_close,
4739 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004740 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004741 .ndo_set_mac_address = be_mac_addr_set,
4742 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004743 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004744 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004745 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4746 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004747 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004748 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004749 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004750 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304751 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004752#ifdef CONFIG_NET_POLL_CONTROLLER
4753 .ndo_poll_controller = be_netpoll,
4754#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004755 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4756 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304757#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304758 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304759#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304760#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304761 .ndo_add_vxlan_port = be_add_vxlan_port,
4762 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004763 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304764#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004765};
4766
4767static void be_netdev_init(struct net_device *netdev)
4768{
4769 struct be_adapter *adapter = netdev_priv(netdev);
4770
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004771 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004772 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004773 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004774 if (be_multi_rxq(adapter))
4775 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004776
4777 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004778 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004779
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004780 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004781 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004782
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004783 netdev->priv_flags |= IFF_UNICAST_FLT;
4784
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004785 netdev->flags |= IFF_MULTICAST;
4786
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004787 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004789 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004790
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004791 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004792}
4793
4794static void be_unmap_pci_bars(struct be_adapter *adapter)
4795{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004796 if (adapter->csr)
4797 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004798 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004799 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004800}
4801
Sathya Perlace66f782012-11-06 17:48:58 +00004802static int db_bar(struct be_adapter *adapter)
4803{
4804 if (lancer_chip(adapter) || !be_physfn(adapter))
4805 return 0;
4806 else
4807 return 4;
4808}
4809
4810static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004811{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004812 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004813 adapter->roce_db.size = 4096;
4814 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4815 db_bar(adapter));
4816 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4817 db_bar(adapter));
4818 }
Parav Pandit045508a2012-03-26 14:27:13 +00004819 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004820}
4821
4822static int be_map_pci_bars(struct be_adapter *adapter)
4823{
4824 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004825
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004826 if (BEx_chip(adapter) && be_physfn(adapter)) {
4827 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304828 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004829 return -ENOMEM;
4830 }
4831
Sathya Perlace66f782012-11-06 17:48:58 +00004832 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304833 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004834 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004835 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004836
4837 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004838 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004839
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004840pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304841 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004842 be_unmap_pci_bars(adapter);
4843 return -ENOMEM;
4844}
4845
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004846static void be_ctrl_cleanup(struct be_adapter *adapter)
4847{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004848 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004849
4850 be_unmap_pci_bars(adapter);
4851
4852 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004853 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4854 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004855
Sathya Perla5b8821b2011-08-02 19:57:44 +00004856 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004857 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004858 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4859 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004860}
4861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004862static int be_ctrl_init(struct be_adapter *adapter)
4863{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004864 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4865 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004866 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004867 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004868 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004869
Sathya Perlace66f782012-11-06 17:48:58 +00004870 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4871 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4872 SLI_INTF_FAMILY_SHIFT;
4873 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4874
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004875 status = be_map_pci_bars(adapter);
4876 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004877 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004878
4879 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004880 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4881 mbox_mem_alloc->size,
4882 &mbox_mem_alloc->dma,
4883 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004884 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004885 status = -ENOMEM;
4886 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004887 }
4888 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4889 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4890 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4891 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004892
Sathya Perla5b8821b2011-08-02 19:57:44 +00004893 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004894 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4895 rx_filter->size, &rx_filter->dma,
4896 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304897 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004898 status = -ENOMEM;
4899 goto free_mbox;
4900 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004901
Ivan Vecera29849612010-12-14 05:43:19 +00004902 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004903 spin_lock_init(&adapter->mcc_lock);
4904 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004905
Suresh Reddy5eeff632014-01-06 13:02:24 +05304906 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004907 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004908 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004909
4910free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004911 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4912 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004913
4914unmap_pci_bars:
4915 be_unmap_pci_bars(adapter);
4916
4917done:
4918 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004919}
4920
4921static void be_stats_cleanup(struct be_adapter *adapter)
4922{
Sathya Perla3abcded2010-10-03 22:12:27 -07004923 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004924
4925 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004926 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4927 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004928}
4929
4930static int be_stats_init(struct be_adapter *adapter)
4931{
Sathya Perla3abcded2010-10-03 22:12:27 -07004932 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004933
Sathya Perlaca34fe32012-11-06 17:48:56 +00004934 if (lancer_chip(adapter))
4935 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4936 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004937 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004938 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004939 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004940 else
4941 /* ALL non-BE ASICs */
4942 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004943
Joe Perchesede23fa2013-08-26 22:45:23 -07004944 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4945 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304946 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304947 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004948 return 0;
4949}
4950
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004951static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004952{
4953 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004954
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004955 if (!adapter)
4956 return;
4957
Parav Pandit045508a2012-03-26 14:27:13 +00004958 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004959 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004960
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004961 cancel_delayed_work_sync(&adapter->func_recovery_work);
4962
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004963 unregister_netdev(adapter->netdev);
4964
Sathya Perla5fb379e2009-06-18 00:02:59 +00004965 be_clear(adapter);
4966
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004967 /* tell fw we're done with firing cmds */
4968 be_cmd_fw_clean(adapter);
4969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004970 be_stats_cleanup(adapter);
4971
4972 be_ctrl_cleanup(adapter);
4973
Sathya Perlad6b6d982012-09-05 01:56:48 +00004974 pci_disable_pcie_error_reporting(pdev);
4975
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004976 pci_release_regions(pdev);
4977 pci_disable_device(pdev);
4978
4979 free_netdev(adapter->netdev);
4980}
4981
Sathya Perla39f1d942012-05-08 19:41:24 +00004982static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004983{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304984 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004985
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004986 status = be_cmd_get_cntl_attributes(adapter);
4987 if (status)
4988 return status;
4989
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004990 /* Must be a power of 2 or else MODULO will BUG_ON */
4991 adapter->be_get_temp_freq = 64;
4992
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304993 if (BEx_chip(adapter)) {
4994 level = be_cmd_get_fw_log_level(adapter);
4995 adapter->msg_enable =
4996 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4997 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004998
Sathya Perla92bf14a2013-08-27 16:57:32 +05304999 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00005000 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005001}
5002
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005003static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005004{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005005 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005006 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005007
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005008 status = lancer_test_and_set_rdy_state(adapter);
5009 if (status)
5010 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005011
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005012 if (netif_running(adapter->netdev))
5013 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005014
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005015 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005016
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005017 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005018
5019 status = be_setup(adapter);
5020 if (status)
5021 goto err;
5022
5023 if (netif_running(adapter->netdev)) {
5024 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005025 if (status)
5026 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005027 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005028
Somnath Kotur4bebb562013-12-05 12:07:55 +05305029 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005030 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005031err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005032 if (status == -EAGAIN)
5033 dev_err(dev, "Waiting for resource provisioning\n");
5034 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05305035 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005036
5037 return status;
5038}
5039
5040static void be_func_recovery_task(struct work_struct *work)
5041{
5042 struct be_adapter *adapter =
5043 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005044 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005045
5046 be_detect_error(adapter);
5047
5048 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005049 rtnl_lock();
5050 netif_device_detach(adapter->netdev);
5051 rtnl_unlock();
5052
5053 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005054 if (!status)
5055 netif_device_attach(adapter->netdev);
5056 }
5057
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005058 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5059 * no need to attempt further recovery.
5060 */
5061 if (!status || status == -EAGAIN)
5062 schedule_delayed_work(&adapter->func_recovery_work,
5063 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005064}
5065
Vasundhara Volam21252372015-02-06 08:18:42 -05005066static void be_log_sfp_info(struct be_adapter *adapter)
5067{
5068 int status;
5069
5070 status = be_cmd_query_sfp_info(adapter);
5071 if (!status) {
5072 dev_err(&adapter->pdev->dev,
5073 "Unqualified SFP+ detected on %c from %s part no: %s",
5074 adapter->port_name, adapter->phy.vendor_name,
5075 adapter->phy.vendor_pn);
5076 }
5077 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5078}
5079
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005080static void be_worker(struct work_struct *work)
5081{
5082 struct be_adapter *adapter =
5083 container_of(work, struct be_adapter, work.work);
5084 struct be_rx_obj *rxo;
5085 int i;
5086
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005087 /* when interrupts are not yet enabled, just reap any pending
5088 * mcc completions */
5089 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005090 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005091 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005092 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005093 goto reschedule;
5094 }
5095
5096 if (!adapter->stats_cmd_sent) {
5097 if (lancer_chip(adapter))
5098 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305099 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005100 else
5101 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5102 }
5103
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305104 if (be_physfn(adapter) &&
5105 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005106 be_cmd_get_die_temperature(adapter);
5107
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005108 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305109 /* Replenish RX-queues starved due to memory
5110 * allocation failures.
5111 */
5112 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305113 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005114 }
5115
Sathya Perla2632baf2013-10-01 16:00:00 +05305116 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005117
Vasundhara Volam21252372015-02-06 08:18:42 -05005118 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5119 be_log_sfp_info(adapter);
5120
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005121reschedule:
5122 adapter->work_counter++;
5123 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5124}
5125
Sathya Perla257a3fe2013-06-14 15:54:51 +05305126/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00005127static bool be_reset_required(struct be_adapter *adapter)
5128{
Sathya Perla257a3fe2013-06-14 15:54:51 +05305129 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00005130}
5131
Sathya Perlad3791422012-09-28 04:39:44 +00005132static char *mc_name(struct be_adapter *adapter)
5133{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305134 char *str = ""; /* default */
5135
5136 switch (adapter->mc_type) {
5137 case UMC:
5138 str = "UMC";
5139 break;
5140 case FLEX10:
5141 str = "FLEX10";
5142 break;
5143 case vNIC1:
5144 str = "vNIC-1";
5145 break;
5146 case nPAR:
5147 str = "nPAR";
5148 break;
5149 case UFP:
5150 str = "UFP";
5151 break;
5152 case vNIC2:
5153 str = "vNIC-2";
5154 break;
5155 default:
5156 str = "";
5157 }
5158
5159 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005160}
5161
5162static inline char *func_name(struct be_adapter *adapter)
5163{
5164 return be_physfn(adapter) ? "PF" : "VF";
5165}
5166
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005167static inline char *nic_name(struct pci_dev *pdev)
5168{
5169 switch (pdev->device) {
5170 case OC_DEVICE_ID1:
5171 return OC_NAME;
5172 case OC_DEVICE_ID2:
5173 return OC_NAME_BE;
5174 case OC_DEVICE_ID3:
5175 case OC_DEVICE_ID4:
5176 return OC_NAME_LANCER;
5177 case BE_DEVICE_ID2:
5178 return BE3_NAME;
5179 case OC_DEVICE_ID5:
5180 case OC_DEVICE_ID6:
5181 return OC_NAME_SH;
5182 default:
5183 return BE_NAME;
5184 }
5185}
5186
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005187static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005188{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005189 struct be_adapter *adapter;
5190 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005191 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005192
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305193 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5194
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005195 status = pci_enable_device(pdev);
5196 if (status)
5197 goto do_none;
5198
5199 status = pci_request_regions(pdev, DRV_NAME);
5200 if (status)
5201 goto disable_dev;
5202 pci_set_master(pdev);
5203
Sathya Perla7f640062012-06-05 19:37:20 +00005204 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305205 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005206 status = -ENOMEM;
5207 goto rel_reg;
5208 }
5209 adapter = netdev_priv(netdev);
5210 adapter->pdev = pdev;
5211 pci_set_drvdata(pdev, adapter);
5212 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005213 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005214
Russell King4c15c242013-06-26 23:49:11 +01005215 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005216 if (!status) {
5217 netdev->features |= NETIF_F_HIGHDMA;
5218 } else {
Russell King4c15c242013-06-26 23:49:11 +01005219 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005220 if (status) {
5221 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5222 goto free_netdev;
5223 }
5224 }
5225
Kalesh AP2f951a92014-09-12 17:39:21 +05305226 status = pci_enable_pcie_error_reporting(pdev);
5227 if (!status)
5228 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005229
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005230 status = be_ctrl_init(adapter);
5231 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005232 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005233
Sathya Perla2243e2e2009-11-22 22:02:03 +00005234 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005235 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005236 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005237 if (status)
5238 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005239 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00005240
Sathya Perla39f1d942012-05-08 19:41:24 +00005241 if (be_reset_required(adapter)) {
5242 status = be_cmd_reset_function(adapter);
5243 if (status)
5244 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07005245
Kalesh AP2d177be2013-04-28 22:22:29 +00005246 /* Wait for interrupts to quiesce after an FLR */
5247 msleep(100);
5248 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005249
5250 /* Allow interrupts for other ULPs running on NIC function */
5251 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005252
Kalesh AP2d177be2013-04-28 22:22:29 +00005253 /* tell fw we're ready to fire cmds */
5254 status = be_cmd_fw_init(adapter);
5255 if (status)
5256 goto ctrl_clean;
5257
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005258 status = be_stats_init(adapter);
5259 if (status)
5260 goto ctrl_clean;
5261
Sathya Perla39f1d942012-05-08 19:41:24 +00005262 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005263 if (status)
5264 goto stats_clean;
5265
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005266 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005267 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05305268 adapter->rx_fc = true;
5269 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005270
Sathya Perla5fb379e2009-06-18 00:02:59 +00005271 status = be_setup(adapter);
5272 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00005273 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005274
Sathya Perla3abcded2010-10-03 22:12:27 -07005275 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005276 status = register_netdev(netdev);
5277 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005278 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005279
Parav Pandit045508a2012-03-26 14:27:13 +00005280 be_roce_dev_add(adapter);
5281
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005282 schedule_delayed_work(&adapter->func_recovery_work,
5283 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005284
Sathya Perlad3791422012-09-28 04:39:44 +00005285 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005286 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005288 return 0;
5289
Sathya Perla5fb379e2009-06-18 00:02:59 +00005290unsetup:
5291 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005292stats_clean:
5293 be_stats_cleanup(adapter);
5294ctrl_clean:
5295 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005296free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005297 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005298rel_reg:
5299 pci_release_regions(pdev);
5300disable_dev:
5301 pci_disable_device(pdev);
5302do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005303 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005304 return status;
5305}
5306
5307static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5308{
5309 struct be_adapter *adapter = pci_get_drvdata(pdev);
5310 struct net_device *netdev = adapter->netdev;
5311
Suresh Reddy76a9e082014-01-15 13:23:40 +05305312 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005313 be_setup_wol(adapter, true);
5314
Ajit Khaparded4360d62013-11-22 12:51:09 -06005315 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005316 cancel_delayed_work_sync(&adapter->func_recovery_work);
5317
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005318 netif_device_detach(netdev);
5319 if (netif_running(netdev)) {
5320 rtnl_lock();
5321 be_close(netdev);
5322 rtnl_unlock();
5323 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005324 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005325
5326 pci_save_state(pdev);
5327 pci_disable_device(pdev);
5328 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5329 return 0;
5330}
5331
5332static int be_resume(struct pci_dev *pdev)
5333{
5334 int status = 0;
5335 struct be_adapter *adapter = pci_get_drvdata(pdev);
5336 struct net_device *netdev = adapter->netdev;
5337
5338 netif_device_detach(netdev);
5339
5340 status = pci_enable_device(pdev);
5341 if (status)
5342 return status;
5343
Yijing Wang1ca01512013-06-27 20:53:42 +08005344 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005345 pci_restore_state(pdev);
5346
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305347 status = be_fw_wait_ready(adapter);
5348 if (status)
5349 return status;
5350
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005351 status = be_cmd_reset_function(adapter);
5352 if (status)
5353 return status;
5354
Ajit Khaparded4360d62013-11-22 12:51:09 -06005355 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005356 /* tell fw we're ready to fire cmds */
5357 status = be_cmd_fw_init(adapter);
5358 if (status)
5359 return status;
5360
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005361 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005362 if (netif_running(netdev)) {
5363 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005364 be_open(netdev);
5365 rtnl_unlock();
5366 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005367
5368 schedule_delayed_work(&adapter->func_recovery_work,
5369 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005370 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005371
Suresh Reddy76a9e082014-01-15 13:23:40 +05305372 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005373 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005374
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005375 return 0;
5376}
5377
Sathya Perla82456b02010-02-17 01:35:37 +00005378/*
5379 * An FLR will stop BE from DMAing any data.
5380 */
5381static void be_shutdown(struct pci_dev *pdev)
5382{
5383 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005384
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005385 if (!adapter)
5386 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005387
Devesh Sharmad114f992014-06-10 19:32:15 +05305388 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005389 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005390 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005391
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005392 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005393
Ajit Khaparde57841862011-04-06 18:08:43 +00005394 be_cmd_reset_function(adapter);
5395
Sathya Perla82456b02010-02-17 01:35:37 +00005396 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005397}
5398
Sathya Perlacf588472010-02-14 21:22:01 +00005399static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305400 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005401{
5402 struct be_adapter *adapter = pci_get_drvdata(pdev);
5403 struct net_device *netdev = adapter->netdev;
5404
5405 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5406
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005407 if (!adapter->eeh_error) {
5408 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005409
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005410 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005411
Sathya Perlacf588472010-02-14 21:22:01 +00005412 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005413 netif_device_detach(netdev);
5414 if (netif_running(netdev))
5415 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005416 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005417
5418 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005419 }
Sathya Perlacf588472010-02-14 21:22:01 +00005420
5421 if (state == pci_channel_io_perm_failure)
5422 return PCI_ERS_RESULT_DISCONNECT;
5423
5424 pci_disable_device(pdev);
5425
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005426 /* The error could cause the FW to trigger a flash debug dump.
5427 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005428 * can cause it not to recover; wait for it to finish.
5429 * Wait only for first function as it is needed only once per
5430 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005431 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005432 if (pdev->devfn == 0)
5433 ssleep(30);
5434
Sathya Perlacf588472010-02-14 21:22:01 +00005435 return PCI_ERS_RESULT_NEED_RESET;
5436}
5437
5438static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5439{
5440 struct be_adapter *adapter = pci_get_drvdata(pdev);
5441 int status;
5442
5443 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005444
5445 status = pci_enable_device(pdev);
5446 if (status)
5447 return PCI_ERS_RESULT_DISCONNECT;
5448
5449 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005450 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005451 pci_restore_state(pdev);
5452
5453 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005454 dev_info(&adapter->pdev->dev,
5455 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005456 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005457 if (status)
5458 return PCI_ERS_RESULT_DISCONNECT;
5459
Sathya Perlad6b6d982012-09-05 01:56:48 +00005460 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005461 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005462 return PCI_ERS_RESULT_RECOVERED;
5463}
5464
5465static void be_eeh_resume(struct pci_dev *pdev)
5466{
5467 int status = 0;
5468 struct be_adapter *adapter = pci_get_drvdata(pdev);
5469 struct net_device *netdev = adapter->netdev;
5470
5471 dev_info(&adapter->pdev->dev, "EEH resume\n");
5472
5473 pci_save_state(pdev);
5474
Kalesh AP2d177be2013-04-28 22:22:29 +00005475 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005476 if (status)
5477 goto err;
5478
Kalesh AP03a58ba2014-05-13 14:03:11 +05305479 /* On some BE3 FW versions, after a HW reset,
5480 * interrupts will remain disabled for each function.
5481 * So, explicitly enable interrupts
5482 */
5483 be_intr_set(adapter, true);
5484
Kalesh AP2d177be2013-04-28 22:22:29 +00005485 /* tell fw we're ready to fire cmds */
5486 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005487 if (status)
5488 goto err;
5489
Sathya Perlacf588472010-02-14 21:22:01 +00005490 status = be_setup(adapter);
5491 if (status)
5492 goto err;
5493
5494 if (netif_running(netdev)) {
5495 status = be_open(netdev);
5496 if (status)
5497 goto err;
5498 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005499
5500 schedule_delayed_work(&adapter->func_recovery_work,
5501 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005502 netif_device_attach(netdev);
5503 return;
5504err:
5505 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005506}
5507
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005508static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005509 .error_detected = be_eeh_err_detected,
5510 .slot_reset = be_eeh_reset,
5511 .resume = be_eeh_resume,
5512};
5513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005514static struct pci_driver be_driver = {
5515 .name = DRV_NAME,
5516 .id_table = be_dev_ids,
5517 .probe = be_probe,
5518 .remove = be_remove,
5519 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005520 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005521 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005522 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005523};
5524
5525static int __init be_init_module(void)
5526{
Joe Perches8e95a202009-12-03 07:58:21 +00005527 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5528 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005529 printk(KERN_WARNING DRV_NAME
5530 " : Module param rx_frag_size must be 2048/4096/8192."
5531 " Using 2048\n");
5532 rx_frag_size = 2048;
5533 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005535 return pci_register_driver(&be_driver);
5536}
5537module_init(be_init_module);
5538
5539static void __exit be_exit_module(void)
5540{
5541 pci_unregister_driver(&be_driver);
5542}
5543module_exit(be_exit_module);