blob: 617038fa92f0483d0599a05ddd33be8b8e16df89 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666{
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 struct be_tx_stats *stats = tx_stats(txo);
668
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000688 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689}
690
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530692 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100697 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
Sathya Perlac9c47142014-03-27 10:46:19 +0530707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Sathya Perlac9c47142014-03-27 10:46:19 +0530724 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530736 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100748 if (skb_vlan_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 }
753
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530765 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000772 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000773 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000776 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000778 }
779}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000786 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500787 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000789 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 queue_head_inc(txq);
799
David S. Millerebc8d2a2009-06-09 01:01:31 -0700800 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700801 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530802
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000805 goto dma_err;
806 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ian Campbellb061b392011-08-29 23:18:23 +0000817 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000818 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000820 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 }
827
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla7101e112010-03-22 20:41:12 +0000838dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 while (copied) {
846 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000847 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 map_single = false;
849 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530850 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000851 queue_head_inc(txq);
852 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500853 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000854 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855}
856
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500857static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
858{
859 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
860}
861
Somnath Kotur93040ae2012-06-26 22:32:10 +0000862static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000863 struct sk_buff *skb,
864 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000865{
866 u16 vlan_tag = 0;
867
868 skb = skb_share_check(skb, GFP_ATOMIC);
869 if (unlikely(!skb))
870 return skb;
871
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100872 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000873 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530874
875 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
876 if (!vlan_tag)
877 vlan_tag = adapter->pvid;
878 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
879 * skip VLAN insertion
880 */
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000884
885 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100886 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
887 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888 if (unlikely(!skb))
889 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000890 skb->vlan_tci = 0;
891 }
892
893 /* Insert the outer VLAN, if any */
894 if (adapter->qnq_vid) {
895 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100896 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
897 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000898 if (unlikely(!skb))
899 return skb;
900 if (skip_hw_vlan)
901 *skip_hw_vlan = true;
902 }
903
Somnath Kotur93040ae2012-06-26 22:32:10 +0000904 return skb;
905}
906
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000907static bool be_ipv6_exthdr_check(struct sk_buff *skb)
908{
909 struct ethhdr *eh = (struct ethhdr *)skb->data;
910 u16 offset = ETH_HLEN;
911
912 if (eh->h_proto == htons(ETH_P_IPV6)) {
913 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
914
915 offset += sizeof(struct ipv6hdr);
916 if (ip6h->nexthdr != NEXTHDR_TCP &&
917 ip6h->nexthdr != NEXTHDR_UDP) {
918 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530919 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000920
921 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
922 if (ehdr->hdrlen == 0xff)
923 return true;
924 }
925 }
926 return false;
927}
928
929static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
930{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100931 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000932}
933
Sathya Perla748b5392014-05-09 13:29:13 +0530934static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000935{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000936 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000937}
938
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530939static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
940 struct sk_buff *skb,
941 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000943 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000944 unsigned int eth_hdr_len;
945 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000946
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000947 /* For padded packets, BE HW modifies tot_len field in IP header
948 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000949 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000950 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000951 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
952 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000953 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100954 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000955 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000956 ip = (struct iphdr *)ip_hdr(skb);
957 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
958 }
959
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530961 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000962 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530963 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530965 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966
Somnath Kotur93040ae2012-06-26 22:32:10 +0000967 /* HW has a bug wherein it will calculate CSUM for VLAN
968 * pkts even though it is disabled.
969 * Manually insert VLAN in pkt.
970 */
971 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100972 skb_vlan_tag_present(skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000973 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000974 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530975 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000976 }
977
978 /* HW may lockup when VLAN HW tagging is requested on
979 * certain ipv6 packets. Drop such pkts if the HW workaround to
980 * skip HW tagging is not enabled by FW.
981 */
982 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530983 (adapter->pvid || adapter->qnq_vid) &&
984 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000985 goto tx_drop;
986
987 /* Manual VLAN tag insertion to prevent:
988 * ASIC lockup when the ASIC inserts VLAN tag into
989 * certain ipv6 packets. Insert VLAN tags in driver,
990 * and set event, completion, vlan bits accordingly
991 * in the Tx WRB.
992 */
993 if (be_ipv6_tx_stall_chk(adapter, skb) &&
994 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000995 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000996 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000998 }
999
Sathya Perlaee9c7992013-05-22 23:04:55 +00001000 return skb;
1001tx_drop:
1002 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301003err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 return NULL;
1005}
1006
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301007static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1008 struct sk_buff *skb,
1009 bool *skip_hw_vlan)
1010{
1011 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1012 * less may cause a transmit stall on that port. So the work-around is
1013 * to pad short packets (<= 32 bytes) to a 36-byte length.
1014 */
1015 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001016 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301017 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301018 }
1019
1020 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1021 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1022 if (!skb)
1023 return NULL;
1024 }
1025
1026 return skb;
1027}
1028
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001029static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1030{
1031 struct be_queue_info *txq = &txo->q;
1032 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1033
1034 /* Mark the last request eventable if it hasn't been marked already */
1035 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1036 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1037
1038 /* compose a dummy wrb if there are odd set of wrbs to notify */
1039 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1040 wrb_fill(queue_head_node(txq), 0, 0);
1041 queue_head_inc(txq);
1042 atomic_inc(&txq->used);
1043 txo->pend_wrb_cnt++;
1044 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1045 TX_HDR_WRB_NUM_SHIFT);
1046 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1047 TX_HDR_WRB_NUM_SHIFT);
1048 }
1049 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1050 txo->pend_wrb_cnt = 0;
1051}
1052
Sathya Perlaee9c7992013-05-22 23:04:55 +00001053static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1054{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001055 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001057 u16 q_idx = skb_get_queue_mapping(skb);
1058 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001060 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001061
1062 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001063 if (unlikely(!skb))
1064 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001065
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001066 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1067 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001069 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001071
1072 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1073 netif_stop_subqueue(netdev, q_idx);
1074 tx_stats(txo)->tx_stops++;
1075 }
1076
1077 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1078 be_xmit_flush(adapter, txo);
1079
1080 return NETDEV_TX_OK;
1081drop:
1082 tx_stats(txo)->tx_drv_drops++;
1083 /* Flush the already enqueued tx requests */
1084 if (flush && txo->pend_wrb_cnt)
1085 be_xmit_flush(adapter, txo);
1086
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 return NETDEV_TX_OK;
1088}
1089
1090static int be_change_mtu(struct net_device *netdev, int new_mtu)
1091{
1092 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301093 struct device *dev = &adapter->pdev->dev;
1094
1095 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1096 dev_info(dev, "MTU must be between %d and %d bytes\n",
1097 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 return -EINVAL;
1099 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301100
1101 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301102 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 netdev->mtu = new_mtu;
1104 return 0;
1105}
1106
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001107static inline bool be_in_all_promisc(struct be_adapter *adapter)
1108{
1109 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1110 BE_IF_FLAGS_ALL_PROMISCUOUS;
1111}
1112
1113static int be_set_vlan_promisc(struct be_adapter *adapter)
1114{
1115 struct device *dev = &adapter->pdev->dev;
1116 int status;
1117
1118 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1119 return 0;
1120
1121 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1122 if (!status) {
1123 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1124 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1125 } else {
1126 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1127 }
1128 return status;
1129}
1130
1131static int be_clear_vlan_promisc(struct be_adapter *adapter)
1132{
1133 struct device *dev = &adapter->pdev->dev;
1134 int status;
1135
1136 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1137 if (!status) {
1138 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1139 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1140 }
1141 return status;
1142}
1143
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001145 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1146 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 */
Sathya Perla10329df2012-06-05 19:37:18 +00001148static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
Vasundhara Volam50762662014-09-12 17:39:14 +05301150 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001151 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301152 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001153 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001154
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001155 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001156 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001157 return 0;
1158
Sathya Perla92bf14a2013-08-27 16:57:32 +05301159 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001160 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001161
1162 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301163 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1164 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001165
Kalesh AP4d567d92014-05-09 13:29:17 +05301166 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001167 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001168 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001169 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301170 if (addl_status(status) ==
1171 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001172 return be_set_vlan_promisc(adapter);
1173 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1174 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001176 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177}
1178
Patrick McHardy80d5c362013-04-19 02:04:28 +00001179static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001182 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001184 /* Packets with VID 0 are always received by Lancer by default */
1185 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301186 return status;
1187
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301188 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301189 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001190
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301191 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301192 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001193
Somnath Kotura6b74e02014-01-21 15:50:55 +05301194 status = be_vid_config(adapter);
1195 if (status) {
1196 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301197 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301198 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301199
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001200 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201}
1202
Patrick McHardy80d5c362013-04-19 02:04:28 +00001203static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204{
1205 struct be_adapter *adapter = netdev_priv(netdev);
1206
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001207 /* Packets with VID 0 are always received by Lancer by default */
1208 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301209 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001210
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301211 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301212 adapter->vlans_added--;
1213
1214 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215}
1216
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001217static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301218{
Sathya Perlaac34b742015-02-06 08:18:40 -05001219 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001220 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1221}
1222
1223static void be_set_all_promisc(struct be_adapter *adapter)
1224{
1225 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1226 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1227}
1228
1229static void be_set_mc_promisc(struct be_adapter *adapter)
1230{
1231 int status;
1232
1233 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1234 return;
1235
1236 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1237 if (!status)
1238 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1239}
1240
1241static void be_set_mc_list(struct be_adapter *adapter)
1242{
1243 int status;
1244
1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1246 if (!status)
1247 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1248 else
1249 be_set_mc_promisc(adapter);
1250}
1251
1252static void be_set_uc_list(struct be_adapter *adapter)
1253{
1254 struct netdev_hw_addr *ha;
1255 int i = 1; /* First slot is claimed by the Primary MAC */
1256
1257 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1258 be_cmd_pmac_del(adapter, adapter->if_handle,
1259 adapter->pmac_id[i], 0);
1260
1261 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1262 be_set_all_promisc(adapter);
1263 return;
1264 }
1265
1266 netdev_for_each_uc_addr(ha, adapter->netdev) {
1267 adapter->uc_macs++; /* First slot is for Primary MAC */
1268 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1269 &adapter->pmac_id[adapter->uc_macs], 0);
1270 }
1271}
1272
1273static void be_clear_uc_list(struct be_adapter *adapter)
1274{
1275 int i;
1276
1277 for (i = 1; i < (adapter->uc_macs + 1); i++)
1278 be_cmd_pmac_del(adapter, adapter->if_handle,
1279 adapter->pmac_id[i], 0);
1280 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301281}
1282
Sathya Perlaa54769f2011-10-24 02:45:00 +00001283static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284{
1285 struct be_adapter *adapter = netdev_priv(netdev);
1286
1287 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001288 be_set_all_promisc(adapter);
1289 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001291
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001292 /* Interface was previously in promiscuous mode; disable it */
1293 if (be_in_all_promisc(adapter)) {
1294 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001295 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001296 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001297 }
1298
Sathya Perlae7b909a2009-11-22 22:01:10 +00001299 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001300 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001301 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1302 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301303 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001304 }
Kalesh APa0794882014-05-30 19:06:23 +05301305
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001306 if (netdev_uc_count(netdev) != adapter->uc_macs)
1307 be_set_uc_list(adapter);
1308
1309 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310}
1311
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001312static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001315 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001316 int status;
1317
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001319 return -EPERM;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001322 return -EINVAL;
1323
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301324 /* Proceed further only if user provided MAC is different
1325 * from active MAC
1326 */
1327 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1328 return 0;
1329
Sathya Perla3175d8c2013-07-23 15:25:03 +05301330 if (BEx_chip(adapter)) {
1331 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1332 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001333
Sathya Perla11ac75e2011-12-13 00:58:50 +00001334 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1335 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301336 } else {
1337 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1338 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001339 }
1340
Kalesh APabccf232014-07-17 16:20:24 +05301341 if (status) {
1342 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1343 mac, vf, status);
1344 return be_cmd_status(status);
1345 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001346
Kalesh APabccf232014-07-17 16:20:24 +05301347 ether_addr_copy(vf_cfg->mac_addr, mac);
1348
1349 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001350}
1351
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001352static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301353 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001354{
1355 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001356 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001357
Sathya Perla11ac75e2011-12-13 00:58:50 +00001358 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001359 return -EPERM;
1360
Sathya Perla11ac75e2011-12-13 00:58:50 +00001361 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001362 return -EINVAL;
1363
1364 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001365 vi->max_tx_rate = vf_cfg->tx_rate;
1366 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001367 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1368 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001369 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301370 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001371
1372 return 0;
1373}
1374
Sathya Perla748b5392014-05-09 13:29:13 +05301375static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001376{
1377 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001378 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001379 int status = 0;
1380
Sathya Perla11ac75e2011-12-13 00:58:50 +00001381 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001382 return -EPERM;
1383
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001384 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001385 return -EINVAL;
1386
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001387 if (vlan || qos) {
1388 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301389 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001390 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1391 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001392 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001393 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301394 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1395 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001396 }
1397
Kalesh APabccf232014-07-17 16:20:24 +05301398 if (status) {
1399 dev_err(&adapter->pdev->dev,
1400 "VLAN %d config on VF %d failed : %#x\n", vlan,
1401 vf, status);
1402 return be_cmd_status(status);
1403 }
1404
1405 vf_cfg->vlan_tag = vlan;
1406
1407 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001408}
1409
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001410static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1411 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001412{
1413 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301414 struct device *dev = &adapter->pdev->dev;
1415 int percent_rate, status = 0;
1416 u16 link_speed = 0;
1417 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001418
Sathya Perla11ac75e2011-12-13 00:58:50 +00001419 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001420 return -EPERM;
1421
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001422 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001423 return -EINVAL;
1424
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001425 if (min_tx_rate)
1426 return -EINVAL;
1427
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301428 if (!max_tx_rate)
1429 goto config_qos;
1430
1431 status = be_cmd_link_status_query(adapter, &link_speed,
1432 &link_status, 0);
1433 if (status)
1434 goto err;
1435
1436 if (!link_status) {
1437 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301438 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301439 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001440 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001441
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301442 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1443 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1444 link_speed);
1445 status = -EINVAL;
1446 goto err;
1447 }
1448
1449 /* On Skyhawk the QOS setting must be done only as a % value */
1450 percent_rate = link_speed / 100;
1451 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1452 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1453 percent_rate);
1454 status = -EINVAL;
1455 goto err;
1456 }
1457
1458config_qos:
1459 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001460 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301461 goto err;
1462
1463 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1464 return 0;
1465
1466err:
1467 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1468 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301469 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001470}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301471
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301472static int be_set_vf_link_state(struct net_device *netdev, int vf,
1473 int link_state)
1474{
1475 struct be_adapter *adapter = netdev_priv(netdev);
1476 int status;
1477
1478 if (!sriov_enabled(adapter))
1479 return -EPERM;
1480
1481 if (vf >= adapter->num_vfs)
1482 return -EINVAL;
1483
1484 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301485 if (status) {
1486 dev_err(&adapter->pdev->dev,
1487 "Link state change on VF %d failed: %#x\n", vf, status);
1488 return be_cmd_status(status);
1489 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301490
Kalesh APabccf232014-07-17 16:20:24 +05301491 adapter->vf_cfg[vf].plink_tracking = link_state;
1492
1493 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301494}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001495
Sathya Perla2632baf2013-10-01 16:00:00 +05301496static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1497 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498{
Sathya Perla2632baf2013-10-01 16:00:00 +05301499 aic->rx_pkts_prev = rx_pkts;
1500 aic->tx_reqs_prev = tx_pkts;
1501 aic->jiffies = now;
1502}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001503
Sathya Perla2632baf2013-10-01 16:00:00 +05301504static void be_eqd_update(struct be_adapter *adapter)
1505{
1506 struct be_set_eqd set_eqd[MAX_EVT_QS];
1507 int eqd, i, num = 0, start;
1508 struct be_aic_obj *aic;
1509 struct be_eq_obj *eqo;
1510 struct be_rx_obj *rxo;
1511 struct be_tx_obj *txo;
1512 u64 rx_pkts, tx_pkts;
1513 ulong now;
1514 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001515
Sathya Perla2632baf2013-10-01 16:00:00 +05301516 for_all_evt_queues(adapter, eqo, i) {
1517 aic = &adapter->aic_obj[eqo->idx];
1518 if (!aic->enable) {
1519 if (aic->jiffies)
1520 aic->jiffies = 0;
1521 eqd = aic->et_eqd;
1522 goto modify_eqd;
1523 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524
Sathya Perla2632baf2013-10-01 16:00:00 +05301525 rxo = &adapter->rx_obj[eqo->idx];
1526 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001527 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301528 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001529 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001530
Sathya Perla2632baf2013-10-01 16:00:00 +05301531 txo = &adapter->tx_obj[eqo->idx];
1532 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001533 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301534 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001535 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001536
Sathya Perla2632baf2013-10-01 16:00:00 +05301537 /* Skip, if wrapped around or first calculation */
1538 now = jiffies;
1539 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1540 rx_pkts < aic->rx_pkts_prev ||
1541 tx_pkts < aic->tx_reqs_prev) {
1542 be_aic_update(aic, rx_pkts, tx_pkts, now);
1543 continue;
1544 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001545
Sathya Perla2632baf2013-10-01 16:00:00 +05301546 delta = jiffies_to_msecs(now - aic->jiffies);
1547 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1548 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1549 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001550
Sathya Perla2632baf2013-10-01 16:00:00 +05301551 if (eqd < 8)
1552 eqd = 0;
1553 eqd = min_t(u32, eqd, aic->max_eqd);
1554 eqd = max_t(u32, eqd, aic->min_eqd);
1555
1556 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001557modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301558 if (eqd != aic->prev_eqd) {
1559 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1560 set_eqd[num].eq_id = eqo->q.id;
1561 aic->prev_eqd = eqd;
1562 num++;
1563 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001564 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301565
1566 if (num)
1567 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001568}
1569
Sathya Perla3abcded2010-10-03 22:12:27 -07001570static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301571 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001572{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001573 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001574
Sathya Perlaab1594e2011-07-25 19:10:15 +00001575 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001576 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001577 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001578 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001579 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001580 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001581 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001582 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001583 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584}
1585
Sathya Perla2e588f82011-03-11 02:49:26 +00001586static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001587{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001588 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301589 * Also ignore ipcksm for ipv6 pkts
1590 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001591 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301592 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001593}
1594
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301595static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001597 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001599 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301600 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla3abcded2010-10-03 22:12:27 -07001602 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 BUG_ON(!rx_page_info->page);
1604
Sathya Perlae50287b2014-03-04 12:14:38 +05301605 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001606 dma_unmap_page(&adapter->pdev->dev,
1607 dma_unmap_addr(rx_page_info, bus),
1608 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301609 rx_page_info->last_frag = false;
1610 } else {
1611 dma_sync_single_for_cpu(&adapter->pdev->dev,
1612 dma_unmap_addr(rx_page_info, bus),
1613 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301616 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 atomic_dec(&rxq->used);
1618 return rx_page_info;
1619}
1620
1621/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001622static void be_rx_compl_discard(struct be_rx_obj *rxo,
1623 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001626 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001628 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301629 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001630 put_page(page_info->page);
1631 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 }
1633}
1634
1635/*
1636 * skb_fill_rx_data forms a complete skb for an ether frame
1637 * indicated by rxcp.
1638 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1640 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001643 u16 i, j;
1644 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645 u8 *start;
1646
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301647 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 start = page_address(page_info->page) + page_info->page_offset;
1649 prefetch(start);
1650
1651 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 skb->len = curr_frag_len;
1655 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001656 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 /* Complete packet has now been moved to data */
1658 put_page(page_info->page);
1659 skb->data_len = 0;
1660 skb->tail += curr_frag_len;
1661 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001662 hdr_len = ETH_HLEN;
1663 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001665 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 skb_shinfo(skb)->frags[0].page_offset =
1667 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301668 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1669 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001671 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672 skb->tail += hdr_len;
1673 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001674 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Sathya Perla2e588f82011-03-11 02:49:26 +00001676 if (rxcp->pkt_size <= rx_frag_size) {
1677 BUG_ON(rxcp->num_rcvd != 1);
1678 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 }
1680
1681 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001682 remaining = rxcp->pkt_size - curr_frag_len;
1683 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301684 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001685 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001687 /* Coalesce all frags from the same physical page in one slot */
1688 if (page_info->page_offset == 0) {
1689 /* Fresh page */
1690 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001691 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001692 skb_shinfo(skb)->frags[j].page_offset =
1693 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001694 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001695 skb_shinfo(skb)->nr_frags++;
1696 } else {
1697 put_page(page_info->page);
1698 }
1699
Eric Dumazet9e903e02011-10-18 21:00:24 +00001700 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 skb->len += curr_frag_len;
1702 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001703 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001704 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001705 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001707 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708}
1709
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001710/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301711static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001712 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001715 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001717
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001718 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001719 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001720 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001721 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 return;
1723 }
1724
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001725 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001727 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001728 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001729 else
1730 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001732 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001733 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001734 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001735 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301736
Tom Herbertb6c0e892014-08-27 21:27:17 -07001737 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301738 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739
Jiri Pirko343e43c2011-08-25 02:50:51 +00001740 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001742
1743 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744}
1745
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001746/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001747static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1748 struct napi_struct *napi,
1749 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001751 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001753 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001754 u16 remaining, curr_frag_len;
1755 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001756
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001757 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001758 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001759 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001760 return;
1761 }
1762
Sathya Perla2e588f82011-03-11 02:49:26 +00001763 remaining = rxcp->pkt_size;
1764 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301765 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
1767 curr_frag_len = min(remaining, rx_frag_size);
1768
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001769 /* Coalesce all frags from the same physical page in one slot */
1770 if (i == 0 || page_info->page_offset == 0) {
1771 /* First frag or Fresh page */
1772 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001773 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001774 skb_shinfo(skb)->frags[j].page_offset =
1775 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001776 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001777 } else {
1778 put_page(page_info->page);
1779 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001780 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001781 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 memset(page_info, 0, sizeof(*page_info));
1784 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001785 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001787 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001788 skb->len = rxcp->pkt_size;
1789 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001790 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001791 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001792 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001793 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301794
Tom Herbertb6c0e892014-08-27 21:27:17 -07001795 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301796 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001797
Jiri Pirko343e43c2011-08-25 02:50:51 +00001798 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001799 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001800
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001801 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802}
1803
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001804static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1805 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301807 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1808 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1809 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1810 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1811 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1812 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1813 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1814 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1815 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1816 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1817 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001818 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301819 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1820 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001821 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301822 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301823 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301824 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001825}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001827static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1828 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001829{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301830 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1831 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1832 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1833 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1834 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1835 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1836 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1837 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1838 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1839 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1840 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001841 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301842 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1843 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001844 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301845 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1846 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001847}
1848
1849static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1850{
1851 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1852 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1853 struct be_adapter *adapter = rxo->adapter;
1854
1855 /* For checking the valid bit it is Ok to use either definition as the
1856 * valid bit is at the same position in both v0 and v1 Rx compl */
1857 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 return NULL;
1859
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001860 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001861 be_dws_le_to_cpu(compl, sizeof(*compl));
1862
1863 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001864 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001865 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001867
Somnath Koture38b1702013-05-29 22:55:56 +00001868 if (rxcp->ip_frag)
1869 rxcp->l4_csum = 0;
1870
Sathya Perla15d72182011-03-21 20:49:26 +00001871 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301872 /* In QNQ modes, if qnq bit is not set, then the packet was
1873 * tagged only with the transparent outer vlan-tag and must
1874 * not be treated as a vlan packet by host
1875 */
1876 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001877 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001878
Sathya Perla15d72182011-03-21 20:49:26 +00001879 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001880 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001881
Somnath Kotur939cf302011-08-18 21:51:49 -07001882 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301883 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001884 rxcp->vlanf = 0;
1885 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001886
1887 /* As the compl has been parsed, reset it; we wont touch it again */
1888 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perla3abcded2010-10-03 22:12:27 -07001890 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 return rxcp;
1892}
1893
Eric Dumazet1829b082011-03-01 05:48:12 +00001894static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001899 gfp |= __GFP_COMP;
1900 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901}
1902
1903/*
1904 * Allocate a page, split it to fragments of size rx_frag_size and post as
1905 * receive buffers to BE
1906 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301907static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908{
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001910 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001913 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 struct be_eth_rx_d *rxd;
1915 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301916 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301919 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001921 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001923 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924 break;
1925 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001926 page_dmaaddr = dma_map_page(dev, pagep, 0,
1927 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001928 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001929 if (dma_mapping_error(dev, page_dmaaddr)) {
1930 put_page(pagep);
1931 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301932 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001933 break;
1934 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301935 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 } else {
1937 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301938 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301940 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942
1943 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301944 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1946 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947
1948 /* Any space left in the current big page for another frag? */
1949 if ((page_offset + rx_frag_size + rx_frag_size) >
1950 adapter->big_page_size) {
1951 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301952 page_info->last_frag = true;
1953 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1954 } else {
1955 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001957
1958 prev_page_info = page_info;
1959 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301962
1963 /* Mark the last frag of a page when we break out of the above loop
1964 * with no more slots available in the RXQ
1965 */
1966 if (pagep) {
1967 prev_page_info->last_frag = true;
1968 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1969 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970
1971 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301973 if (rxo->rx_post_starved)
1974 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301975 do {
1976 notify = min(256u, posted);
1977 be_rxq_notify(adapter, rxq->id, notify);
1978 posted -= notify;
1979 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001980 } else if (atomic_read(&rxq->used) == 0) {
1981 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001982 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984}
1985
Sathya Perla5fb379e2009-06-18 00:02:59 +00001986static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1989
1990 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1991 return NULL;
1992
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001993 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1995
1996 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1997
1998 queue_tail_inc(tx_cq);
1999 return txcp;
2000}
2001
Sathya Perla3c8def92011-06-12 20:01:58 +00002002static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302003 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004{
Sathya Perla3c8def92011-06-12 20:01:58 +00002005 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002006 struct be_queue_info *txq = &txo->q;
2007 u16 frag_index, num_wrbs = 0;
2008 struct sk_buff *skb = NULL;
2009 bool unmap_skb_hdr = false;
2010 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002012 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002013 if (sent_skbs[txq->tail]) {
2014 /* Free skb from prev req */
2015 if (skb)
2016 dev_consume_skb_any(skb);
2017 skb = sent_skbs[txq->tail];
2018 sent_skbs[txq->tail] = NULL;
2019 queue_tail_inc(txq); /* skip hdr wrb */
2020 num_wrbs++;
2021 unmap_skb_hdr = true;
2022 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002023 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002024 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002025 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002026 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002027 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002029 num_wrbs++;
2030 } while (frag_index != last_index);
2031 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002033 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034}
2035
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036/* Return the number of events in the event queue */
2037static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002038{
2039 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002040 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002041
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002042 do {
2043 eqe = queue_tail_node(&eqo->q);
2044 if (eqe->evt == 0)
2045 break;
2046
2047 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002048 eqe->evt = 0;
2049 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 queue_tail_inc(&eqo->q);
2051 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002052
2053 return num;
2054}
2055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056/* Leaves the EQ is disarmed state */
2057static void be_eq_clean(struct be_eq_obj *eqo)
2058{
2059 int num = events_get(eqo);
2060
2061 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2062}
2063
2064static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065{
2066 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 struct be_queue_info *rxq = &rxo->q;
2068 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002069 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002070 struct be_adapter *adapter = rxo->adapter;
2071 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072
Sathya Perlad23e9462012-12-17 19:38:51 +00002073 /* Consume pending rx completions.
2074 * Wait for the flush completion (identified by zero num_rcvd)
2075 * to arrive. Notify CQ even when there are no more CQ entries
2076 * for HW to flush partially coalesced CQ entries.
2077 * In Lancer, there is no need to wait for flush compl.
2078 */
2079 for (;;) {
2080 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302081 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002082 if (lancer_chip(adapter))
2083 break;
2084
2085 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2086 dev_warn(&adapter->pdev->dev,
2087 "did not receive flush compl\n");
2088 break;
2089 }
2090 be_cq_notify(adapter, rx_cq->id, true, 0);
2091 mdelay(1);
2092 } else {
2093 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002094 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002095 if (rxcp->num_rcvd == 0)
2096 break;
2097 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098 }
2099
Sathya Perlad23e9462012-12-17 19:38:51 +00002100 /* After cleanup, leave the CQ in unarmed state */
2101 be_cq_notify(adapter, rx_cq->id, false, 0);
2102
2103 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302104 while (atomic_read(&rxq->used) > 0) {
2105 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002106 put_page(page_info->page);
2107 memset(page_info, 0, sizeof(*page_info));
2108 }
2109 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302110 rxq->tail = 0;
2111 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112}
2113
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002114static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002116 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2117 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002118 struct be_tx_obj *txo;
2119 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002120 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002121 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302123 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002124 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002125 pending_txqs = adapter->num_tx_qs;
2126
2127 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302128 cmpl = 0;
2129 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002130 txq = &txo->q;
2131 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302132 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002133 num_wrbs += be_tx_compl_process(adapter, txo,
2134 end_idx);
2135 cmpl++;
2136 }
2137 if (cmpl) {
2138 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2139 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302140 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002141 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002142 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002143 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002144 }
2145
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302146 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002147 break;
2148
2149 mdelay(1);
2150 } while (true);
2151
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002152 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002153 for_all_tx_queues(adapter, txo, i) {
2154 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002155
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002156 if (atomic_read(&txq->used)) {
2157 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2158 i, atomic_read(&txq->used));
2159 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002160 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002161 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2162 txq->len);
2163 /* Use the tx-compl process logic to handle requests
2164 * that were not sent to the HW.
2165 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002166 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2167 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002168 BUG_ON(atomic_read(&txq->used));
2169 txo->pend_wrb_cnt = 0;
2170 /* Since hw was never notified of these requests,
2171 * reset TXQ indices
2172 */
2173 txq->head = notified_idx;
2174 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002175 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002176 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177}
2178
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179static void be_evt_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_eq_obj *eqo;
2182 int i;
2183
2184 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002185 if (eqo->q.created) {
2186 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302188 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302189 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002190 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 be_queue_free(adapter, &eqo->q);
2192 }
2193}
2194
2195static int be_evt_queues_create(struct be_adapter *adapter)
2196{
2197 struct be_queue_info *eq;
2198 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302199 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 int i, rc;
2201
Sathya Perla92bf14a2013-08-27 16:57:32 +05302202 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2203 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204
2205 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302206 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2207 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302208 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302209 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002211 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302212 aic->max_eqd = BE_MAX_EQD;
2213 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214
2215 eq = &eqo->q;
2216 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302217 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 if (rc)
2219 return rc;
2220
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302221 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222 if (rc)
2223 return rc;
2224 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002225 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226}
2227
Sathya Perla5fb379e2009-06-18 00:02:59 +00002228static void be_mcc_queues_destroy(struct be_adapter *adapter)
2229{
2230 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002231
Sathya Perla8788fdc2009-07-27 22:52:03 +00002232 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002233 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002234 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002235 be_queue_free(adapter, q);
2236
Sathya Perla8788fdc2009-07-27 22:52:03 +00002237 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002238 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002239 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002240 be_queue_free(adapter, q);
2241}
2242
2243/* Must be called only after TX qs are created as MCC shares TX EQ */
2244static int be_mcc_queues_create(struct be_adapter *adapter)
2245{
2246 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002247
Sathya Perla8788fdc2009-07-27 22:52:03 +00002248 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002249 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302250 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002251 goto err;
2252
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 /* Use the default EQ for MCC completions */
2254 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002255 goto mcc_cq_free;
2256
Sathya Perla8788fdc2009-07-27 22:52:03 +00002257 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002258 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2259 goto mcc_cq_destroy;
2260
Sathya Perla8788fdc2009-07-27 22:52:03 +00002261 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002262 goto mcc_q_free;
2263
2264 return 0;
2265
2266mcc_q_free:
2267 be_queue_free(adapter, q);
2268mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002269 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002270mcc_cq_free:
2271 be_queue_free(adapter, cq);
2272err:
2273 return -1;
2274}
2275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276static void be_tx_queues_destroy(struct be_adapter *adapter)
2277{
2278 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002279 struct be_tx_obj *txo;
2280 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
Sathya Perla3c8def92011-06-12 20:01:58 +00002282 for_all_tx_queues(adapter, txo, i) {
2283 q = &txo->q;
2284 if (q->created)
2285 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2286 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287
Sathya Perla3c8def92011-06-12 20:01:58 +00002288 q = &txo->cq;
2289 if (q->created)
2290 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2291 be_queue_free(adapter, q);
2292 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293}
2294
Sathya Perla77071332013-08-27 16:57:34 +05302295static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002298 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302299 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300
Sathya Perla92bf14a2013-08-27 16:57:32 +05302301 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002302
Sathya Perla3c8def92011-06-12 20:01:58 +00002303 for_all_tx_queues(adapter, txo, i) {
2304 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2306 sizeof(struct be_eth_tx_compl));
2307 if (status)
2308 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309
John Stultz827da442013-10-07 15:51:58 -07002310 u64_stats_init(&txo->stats.sync);
2311 u64_stats_init(&txo->stats.sync_compl);
2312
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 /* If num_evt_qs is less than num_tx_qs, then more than
2314 * one txq share an eq
2315 */
2316 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2317 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2318 if (status)
2319 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2322 sizeof(struct be_eth_wrb));
2323 if (status)
2324 return status;
2325
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002326 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 if (status)
2328 return status;
2329 }
2330
Sathya Perlad3791422012-09-28 04:39:44 +00002331 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2332 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 return 0;
2334}
2335
2336static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337{
2338 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 struct be_rx_obj *rxo;
2340 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341
Sathya Perla3abcded2010-10-03 22:12:27 -07002342 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002343 q = &rxo->cq;
2344 if (q->created)
2345 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2346 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348}
2349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002351{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002353 struct be_rx_obj *rxo;
2354 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355
Sathya Perla92bf14a2013-08-27 16:57:32 +05302356 /* We can create as many RSS rings as there are EQs. */
2357 adapter->num_rx_qs = adapter->num_evt_qs;
2358
2359 /* We'll use RSS only if atleast 2 RSS rings are supported.
2360 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302362 if (adapter->num_rx_qs > 1)
2363 adapter->num_rx_qs++;
2364
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002366 for_all_rx_queues(adapter, rxo, i) {
2367 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002368 cq = &rxo->cq;
2369 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302370 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002371 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373
John Stultz827da442013-10-07 15:51:58 -07002374 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002375 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2376 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002379 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380
Sathya Perlad3791422012-09-28 04:39:44 +00002381 dev_info(&adapter->pdev->dev,
2382 "created %d RSS queue(s) and 1 default RX queue\n",
2383 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002385}
2386
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387static irqreturn_t be_intx(int irq, void *dev)
2388{
Sathya Perlae49cc342012-11-27 19:50:02 +00002389 struct be_eq_obj *eqo = dev;
2390 struct be_adapter *adapter = eqo->adapter;
2391 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002393 /* IRQ is not expected when NAPI is scheduled as the EQ
2394 * will not be armed.
2395 * But, this can happen on Lancer INTx where it takes
2396 * a while to de-assert INTx or in BE2 where occasionaly
2397 * an interrupt may be raised even when EQ is unarmed.
2398 * If NAPI is already scheduled, then counting & notifying
2399 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002400 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002401 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002402 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002403 __napi_schedule(&eqo->napi);
2404 if (num_evts)
2405 eqo->spurious_intr = 0;
2406 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002407 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002408
2409 /* Return IRQ_HANDLED only for the the first spurious intr
2410 * after a valid intr to stop the kernel from branding
2411 * this irq as a bad one!
2412 */
2413 if (num_evts || eqo->spurious_intr++ == 0)
2414 return IRQ_HANDLED;
2415 else
2416 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002417}
2418
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002419static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422
Sathya Perla0b545a62012-11-23 00:27:18 +00002423 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2424 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002425 return IRQ_HANDLED;
2426}
2427
Sathya Perla2e588f82011-03-11 02:49:26 +00002428static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429{
Somnath Koture38b1702013-05-29 22:55:56 +00002430 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431}
2432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302434 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435{
Sathya Perla3abcded2010-10-03 22:12:27 -07002436 struct be_adapter *adapter = rxo->adapter;
2437 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002438 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302440 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441
2442 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002443 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444 if (!rxcp)
2445 break;
2446
Sathya Perla12004ae2011-08-02 19:57:46 +00002447 /* Is it a flush compl that has no data */
2448 if (unlikely(rxcp->num_rcvd == 0))
2449 goto loop_continue;
2450
2451 /* Discard compl with partial DMA Lancer B0 */
2452 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002454 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002455 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002456
Sathya Perla12004ae2011-08-02 19:57:46 +00002457 /* On BE drop pkts that arrive due to imperfect filtering in
2458 * promiscuous mode on some skews
2459 */
2460 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302461 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002463 goto loop_continue;
2464 }
2465
Sathya Perla6384a4d2013-10-25 10:40:16 +05302466 /* Don't do gro when we're busy_polling */
2467 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002469 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302470 be_rx_compl_process(rxo, napi, rxcp);
2471
Sathya Perla12004ae2011-08-02 19:57:46 +00002472loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302473 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002474 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002475 }
2476
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002477 if (work_done) {
2478 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002479
Sathya Perla6384a4d2013-10-25 10:40:16 +05302480 /* When an rx-obj gets into post_starved state, just
2481 * let be_worker do the posting.
2482 */
2483 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2484 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302485 be_post_rx_frags(rxo, GFP_ATOMIC,
2486 max_t(u32, MAX_RX_POST,
2487 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 return work_done;
2491}
2492
Kalesh AP512bb8a2014-09-02 09:56:49 +05302493static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2494{
2495 switch (status) {
2496 case BE_TX_COMP_HDR_PARSE_ERR:
2497 tx_stats(txo)->tx_hdr_parse_err++;
2498 break;
2499 case BE_TX_COMP_NDMA_ERR:
2500 tx_stats(txo)->tx_dma_err++;
2501 break;
2502 case BE_TX_COMP_ACL_ERR:
2503 tx_stats(txo)->tx_spoof_check_err++;
2504 break;
2505 }
2506}
2507
2508static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2509{
2510 switch (status) {
2511 case LANCER_TX_COMP_LSO_ERR:
2512 tx_stats(txo)->tx_tso_err++;
2513 break;
2514 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2515 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2516 tx_stats(txo)->tx_spoof_check_err++;
2517 break;
2518 case LANCER_TX_COMP_QINQ_ERR:
2519 tx_stats(txo)->tx_qinq_err++;
2520 break;
2521 case LANCER_TX_COMP_PARITY_ERR:
2522 tx_stats(txo)->tx_internal_parity_err++;
2523 break;
2524 case LANCER_TX_COMP_DMA_ERR:
2525 tx_stats(txo)->tx_dma_err++;
2526 break;
2527 }
2528}
2529
Sathya Perlac8f64612014-09-02 09:56:55 +05302530static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2531 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002532{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302534 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302535 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302536 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537
Sathya Perlac8f64612014-09-02 09:56:55 +05302538 while ((txcp = be_tx_compl_get(&txo->cq))) {
2539 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2540 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2541 work_done++;
2542
Kalesh AP512bb8a2014-09-02 09:56:49 +05302543 compl_status = GET_TX_COMPL_BITS(status, txcp);
2544 if (compl_status) {
2545 if (lancer_chip(adapter))
2546 lancer_update_tx_err(txo, compl_status);
2547 else
2548 be_update_tx_err(txo, compl_status);
2549 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002550 }
2551
2552 if (work_done) {
2553 be_cq_notify(adapter, txo->cq.id, true, work_done);
2554 atomic_sub(num_wrbs, &txo->q.used);
2555
2556 /* As Tx wrbs have been freed up, wake up netdev queue
2557 * if it was stopped due to lack of tx wrbs. */
2558 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302559 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002560 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002561 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002562
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002563 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2564 tx_stats(txo)->tx_compl += work_done;
2565 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2566 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567}
Sathya Perla3c8def92011-06-12 20:01:58 +00002568
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002569#ifdef CONFIG_NET_RX_BUSY_POLL
2570static inline bool be_lock_napi(struct be_eq_obj *eqo)
2571{
2572 bool status = true;
2573
2574 spin_lock(&eqo->lock); /* BH is already disabled */
2575 if (eqo->state & BE_EQ_LOCKED) {
2576 WARN_ON(eqo->state & BE_EQ_NAPI);
2577 eqo->state |= BE_EQ_NAPI_YIELD;
2578 status = false;
2579 } else {
2580 eqo->state = BE_EQ_NAPI;
2581 }
2582 spin_unlock(&eqo->lock);
2583 return status;
2584}
2585
2586static inline void be_unlock_napi(struct be_eq_obj *eqo)
2587{
2588 spin_lock(&eqo->lock); /* BH is already disabled */
2589
2590 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2591 eqo->state = BE_EQ_IDLE;
2592
2593 spin_unlock(&eqo->lock);
2594}
2595
2596static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2597{
2598 bool status = true;
2599
2600 spin_lock_bh(&eqo->lock);
2601 if (eqo->state & BE_EQ_LOCKED) {
2602 eqo->state |= BE_EQ_POLL_YIELD;
2603 status = false;
2604 } else {
2605 eqo->state |= BE_EQ_POLL;
2606 }
2607 spin_unlock_bh(&eqo->lock);
2608 return status;
2609}
2610
2611static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2612{
2613 spin_lock_bh(&eqo->lock);
2614
2615 WARN_ON(eqo->state & (BE_EQ_NAPI));
2616 eqo->state = BE_EQ_IDLE;
2617
2618 spin_unlock_bh(&eqo->lock);
2619}
2620
2621static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2622{
2623 spin_lock_init(&eqo->lock);
2624 eqo->state = BE_EQ_IDLE;
2625}
2626
2627static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2628{
2629 local_bh_disable();
2630
2631 /* It's enough to just acquire napi lock on the eqo to stop
2632 * be_busy_poll() from processing any queueus.
2633 */
2634 while (!be_lock_napi(eqo))
2635 mdelay(1);
2636
2637 local_bh_enable();
2638}
2639
2640#else /* CONFIG_NET_RX_BUSY_POLL */
2641
2642static inline bool be_lock_napi(struct be_eq_obj *eqo)
2643{
2644 return true;
2645}
2646
2647static inline void be_unlock_napi(struct be_eq_obj *eqo)
2648{
2649}
2650
2651static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2652{
2653 return false;
2654}
2655
2656static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2657{
2658}
2659
2660static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2661{
2662}
2663
2664static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2665{
2666}
2667#endif /* CONFIG_NET_RX_BUSY_POLL */
2668
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302669int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002670{
2671 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2672 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002673 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302674 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302675 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002676
Sathya Perla0b545a62012-11-23 00:27:18 +00002677 num_evts = events_get(eqo);
2678
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302679 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2680 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681
Sathya Perla6384a4d2013-10-25 10:40:16 +05302682 if (be_lock_napi(eqo)) {
2683 /* This loop will iterate twice for EQ0 in which
2684 * completions of the last RXQ (default one) are also processed
2685 * For other EQs the loop iterates only once
2686 */
2687 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2688 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2689 max_work = max(work, max_work);
2690 }
2691 be_unlock_napi(eqo);
2692 } else {
2693 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002694 }
2695
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002696 if (is_mcc_eqo(eqo))
2697 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002698
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699 if (max_work < budget) {
2700 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002701 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002702 } else {
2703 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002704 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002705 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707}
2708
Sathya Perla6384a4d2013-10-25 10:40:16 +05302709#ifdef CONFIG_NET_RX_BUSY_POLL
2710static int be_busy_poll(struct napi_struct *napi)
2711{
2712 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2713 struct be_adapter *adapter = eqo->adapter;
2714 struct be_rx_obj *rxo;
2715 int i, work = 0;
2716
2717 if (!be_lock_busy_poll(eqo))
2718 return LL_FLUSH_BUSY;
2719
2720 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2721 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2722 if (work)
2723 break;
2724 }
2725
2726 be_unlock_busy_poll(eqo);
2727 return work;
2728}
2729#endif
2730
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002731void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002732{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002733 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2734 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002735 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302736 bool error_detected = false;
2737 struct device *dev = &adapter->pdev->dev;
2738 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002739
Sathya Perlad23e9462012-12-17 19:38:51 +00002740 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002741 return;
2742
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002743 if (lancer_chip(adapter)) {
2744 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2745 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2746 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302747 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002748 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302749 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302750 adapter->hw_error = true;
2751 /* Do not log error messages if its a FW reset */
2752 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2753 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2754 dev_info(dev, "Firmware update in progress\n");
2755 } else {
2756 error_detected = true;
2757 dev_err(dev, "Error detected in the card\n");
2758 dev_err(dev, "ERR: sliport status 0x%x\n",
2759 sliport_status);
2760 dev_err(dev, "ERR: sliport error1 0x%x\n",
2761 sliport_err1);
2762 dev_err(dev, "ERR: sliport error2 0x%x\n",
2763 sliport_err2);
2764 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002765 }
2766 } else {
2767 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302768 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002769 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302770 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002771 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302772 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002773 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302774 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002775
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002776 ue_lo = (ue_lo & ~ue_lo_mask);
2777 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002778
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302779 /* On certain platforms BE hardware can indicate spurious UEs.
2780 * Allow HW to stop working completely in case of a real UE.
2781 * Hence not setting the hw_error for UE detection.
2782 */
2783
2784 if (ue_lo || ue_hi) {
2785 error_detected = true;
2786 dev_err(dev,
2787 "Unrecoverable Error detected in the adapter");
2788 dev_err(dev, "Please reboot server to recover");
2789 if (skyhawk_chip(adapter))
2790 adapter->hw_error = true;
2791 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2792 if (ue_lo & 1)
2793 dev_err(dev, "UE: %s bit set\n",
2794 ue_status_low_desc[i]);
2795 }
2796 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2797 if (ue_hi & 1)
2798 dev_err(dev, "UE: %s bit set\n",
2799 ue_status_hi_desc[i]);
2800 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302801 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002802 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302803 if (error_detected)
2804 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002805}
2806
Sathya Perla8d56ff12009-11-22 22:02:26 +00002807static void be_msix_disable(struct be_adapter *adapter)
2808{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002809 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002810 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002811 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302812 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002813 }
2814}
2815
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002816static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002817{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002818 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002819 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002820
Sathya Perla92bf14a2013-08-27 16:57:32 +05302821 /* If RoCE is supported, program the max number of NIC vectors that
2822 * may be configured via set-channels, along with vectors needed for
2823 * RoCe. Else, just program the number we'll use initially.
2824 */
2825 if (be_roce_supported(adapter))
2826 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2827 2 * num_online_cpus());
2828 else
2829 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002830
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002831 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002832 adapter->msix_entries[i].entry = i;
2833
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002834 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2835 MIN_MSIX_VECTORS, num_vec);
2836 if (num_vec < 0)
2837 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002838
Sathya Perla92bf14a2013-08-27 16:57:32 +05302839 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2840 adapter->num_msix_roce_vec = num_vec / 2;
2841 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2842 adapter->num_msix_roce_vec);
2843 }
2844
2845 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2846
2847 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2848 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002849 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002850
2851fail:
2852 dev_warn(dev, "MSIx enable failed\n");
2853
2854 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2855 if (!be_physfn(adapter))
2856 return num_vec;
2857 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858}
2859
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002860static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302861 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002862{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302863 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864}
2865
2866static int be_msix_register(struct be_adapter *adapter)
2867{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002868 struct net_device *netdev = adapter->netdev;
2869 struct be_eq_obj *eqo;
2870 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002872 for_all_evt_queues(adapter, eqo, i) {
2873 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2874 vec = be_msix_vec_get(adapter, eqo);
2875 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002876 if (status)
2877 goto err_msix;
2878 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002879
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002880 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002881err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002882 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2883 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2884 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302885 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002886 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002887 return status;
2888}
2889
2890static int be_irq_register(struct be_adapter *adapter)
2891{
2892 struct net_device *netdev = adapter->netdev;
2893 int status;
2894
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002895 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002896 status = be_msix_register(adapter);
2897 if (status == 0)
2898 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002899 /* INTx is not supported for VF */
2900 if (!be_physfn(adapter))
2901 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902 }
2903
Sathya Perlae49cc342012-11-27 19:50:02 +00002904 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002905 netdev->irq = adapter->pdev->irq;
2906 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002907 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908 if (status) {
2909 dev_err(&adapter->pdev->dev,
2910 "INTx request IRQ failed - err %d\n", status);
2911 return status;
2912 }
2913done:
2914 adapter->isr_registered = true;
2915 return 0;
2916}
2917
2918static void be_irq_unregister(struct be_adapter *adapter)
2919{
2920 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002921 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002922 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923
2924 if (!adapter->isr_registered)
2925 return;
2926
2927 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002928 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002929 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930 goto done;
2931 }
2932
2933 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002934 for_all_evt_queues(adapter, eqo, i)
2935 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937done:
2938 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939}
2940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002941static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002942{
2943 struct be_queue_info *q;
2944 struct be_rx_obj *rxo;
2945 int i;
2946
2947 for_all_rx_queues(adapter, rxo, i) {
2948 q = &rxo->q;
2949 if (q->created) {
2950 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002952 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002954 }
2955}
2956
Sathya Perla889cd4b2010-05-30 23:33:45 +00002957static int be_close(struct net_device *netdev)
2958{
2959 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 struct be_eq_obj *eqo;
2961 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002962
Kalesh APe1ad8e32014-04-14 16:12:41 +05302963 /* This protection is needed as be_close() may be called even when the
2964 * adapter is in cleared state (after eeh perm failure)
2965 */
2966 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2967 return 0;
2968
Parav Pandit045508a2012-03-26 14:27:13 +00002969 be_roce_dev_close(adapter);
2970
Ivan Veceradff345c52013-11-27 08:59:32 +01002971 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2972 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002973 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302974 be_disable_busy_poll(eqo);
2975 }
David S. Miller71237b62013-11-28 18:53:36 -05002976 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002977 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002978
2979 be_async_mcc_disable(adapter);
2980
2981 /* Wait for all pending tx completions to arrive so that
2982 * all tx skbs are freed.
2983 */
Sathya Perlafba87552013-05-08 02:05:50 +00002984 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302985 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002986
2987 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05002988 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06002989
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002990 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002991 if (msix_enabled(adapter))
2992 synchronize_irq(be_msix_vec_get(adapter, eqo));
2993 else
2994 synchronize_irq(netdev->irq);
2995 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002996 }
2997
Sathya Perla889cd4b2010-05-30 23:33:45 +00002998 be_irq_unregister(adapter);
2999
Sathya Perla482c9e72011-06-29 23:33:17 +00003000 return 0;
3001}
3002
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003003static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003004{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003005 struct rss_info *rss = &adapter->rss_info;
3006 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003007 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003008 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003009
3010 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003011 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3012 sizeof(struct be_eth_rx_d));
3013 if (rc)
3014 return rc;
3015 }
3016
3017 /* The FW would like the default RXQ to be created first */
3018 rxo = default_rxo(adapter);
3019 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3020 adapter->if_handle, false, &rxo->rss_id);
3021 if (rc)
3022 return rc;
3023
3024 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003025 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003026 rx_frag_size, adapter->if_handle,
3027 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003028 if (rc)
3029 return rc;
3030 }
3031
3032 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303033 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3034 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003035 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303036 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003037 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303038 rss->rsstable[j + i] = rxo->rss_id;
3039 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003040 }
3041 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303042 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3043 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003044
3045 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303046 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3047 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303048 } else {
3049 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303050 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303051 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003052
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003053 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303054 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003055 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303056 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303057 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303058 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003059 }
3060
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003061 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303062
Sathya Perla482c9e72011-06-29 23:33:17 +00003063 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003064 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303065 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003066 return 0;
3067}
3068
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069static int be_open(struct net_device *netdev)
3070{
3071 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003072 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003073 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003074 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003075 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003076 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003078 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003079 if (status)
3080 goto err;
3081
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003082 status = be_irq_register(adapter);
3083 if (status)
3084 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003085
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003086 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003087 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003089 for_all_tx_queues(adapter, txo, i)
3090 be_cq_notify(adapter, txo->cq.id, true, 0);
3091
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003092 be_async_mcc_enable(adapter);
3093
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003094 for_all_evt_queues(adapter, eqo, i) {
3095 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303096 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303097 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003098 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003099 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003100
Sathya Perla323ff712012-09-28 04:39:43 +00003101 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003102 if (!status)
3103 be_link_status_update(adapter, link_status);
3104
Sathya Perlafba87552013-05-08 02:05:50 +00003105 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003106 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303107
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303108#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303109 if (skyhawk_chip(adapter))
3110 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303111#endif
3112
Sathya Perla889cd4b2010-05-30 23:33:45 +00003113 return 0;
3114err:
3115 be_close(adapter->netdev);
3116 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003117}
3118
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003119static int be_setup_wol(struct be_adapter *adapter, bool enable)
3120{
3121 struct be_dma_mem cmd;
3122 int status = 0;
3123 u8 mac[ETH_ALEN];
3124
3125 memset(mac, 0, ETH_ALEN);
3126
3127 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003128 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3129 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303130 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303131 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003132
3133 if (enable) {
3134 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303135 PCICFG_PM_CONTROL_OFFSET,
3136 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003137 if (status) {
3138 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003139 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003140 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3141 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003142 return status;
3143 }
3144 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303145 adapter->netdev->dev_addr,
3146 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003147 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3148 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3149 } else {
3150 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3151 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3152 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3153 }
3154
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003155 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003156 return status;
3157}
3158
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003159static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3160{
3161 u32 addr;
3162
3163 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3164
3165 mac[5] = (u8)(addr & 0xFF);
3166 mac[4] = (u8)((addr >> 8) & 0xFF);
3167 mac[3] = (u8)((addr >> 16) & 0xFF);
3168 /* Use the OUI from the current MAC address */
3169 memcpy(mac, adapter->netdev->dev_addr, 3);
3170}
3171
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003172/*
3173 * Generate a seed MAC address from the PF MAC Address using jhash.
3174 * MAC Address for VFs are assigned incrementally starting from the seed.
3175 * These addresses are programmed in the ASIC by the PF and the VF driver
3176 * queries for the MAC address during its probe.
3177 */
Sathya Perla4c876612013-02-03 20:30:11 +00003178static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003179{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003180 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003181 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003182 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003183 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003184
3185 be_vf_eth_addr_generate(adapter, mac);
3186
Sathya Perla11ac75e2011-12-13 00:58:50 +00003187 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303188 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003189 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003190 vf_cfg->if_handle,
3191 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303192 else
3193 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3194 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003195
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003196 if (status)
3197 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303198 "Mac address assignment failed for VF %d\n",
3199 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003200 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003201 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003202
3203 mac[5] += 1;
3204 }
3205 return status;
3206}
3207
Sathya Perla4c876612013-02-03 20:30:11 +00003208static int be_vfs_mac_query(struct be_adapter *adapter)
3209{
3210 int status, vf;
3211 u8 mac[ETH_ALEN];
3212 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003213
3214 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303215 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3216 mac, vf_cfg->if_handle,
3217 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003218 if (status)
3219 return status;
3220 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3221 }
3222 return 0;
3223}
3224
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003225static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003226{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003227 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003228 u32 vf;
3229
Sathya Perla257a3fe2013-06-14 15:54:51 +05303230 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003231 dev_warn(&adapter->pdev->dev,
3232 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003233 goto done;
3234 }
3235
Sathya Perlab4c1df92013-05-08 02:05:47 +00003236 pci_disable_sriov(adapter->pdev);
3237
Sathya Perla11ac75e2011-12-13 00:58:50 +00003238 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303239 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003240 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3241 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303242 else
3243 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3244 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003245
Sathya Perla11ac75e2011-12-13 00:58:50 +00003246 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3247 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003248done:
3249 kfree(adapter->vf_cfg);
3250 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303251 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003252}
3253
Sathya Perla77071332013-08-27 16:57:34 +05303254static void be_clear_queues(struct be_adapter *adapter)
3255{
3256 be_mcc_queues_destroy(adapter);
3257 be_rx_cqs_destroy(adapter);
3258 be_tx_queues_destroy(adapter);
3259 be_evt_queues_destroy(adapter);
3260}
3261
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303262static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003263{
Sathya Perla191eb752012-02-23 18:50:13 +00003264 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3265 cancel_delayed_work_sync(&adapter->work);
3266 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3267 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303268}
3269
Somnath Koturb05004a2013-12-05 12:08:16 +05303270static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303271{
Somnath Koturb05004a2013-12-05 12:08:16 +05303272 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003273 be_cmd_pmac_del(adapter, adapter->if_handle,
3274 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303275 kfree(adapter->pmac_id);
3276 adapter->pmac_id = NULL;
3277 }
3278}
3279
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303280#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303281static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3282{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003283 struct net_device *netdev = adapter->netdev;
3284
Sathya Perlac9c47142014-03-27 10:46:19 +05303285 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3286 be_cmd_manage_iface(adapter, adapter->if_handle,
3287 OP_CONVERT_TUNNEL_TO_NORMAL);
3288
3289 if (adapter->vxlan_port)
3290 be_cmd_set_vxlan_port(adapter, 0);
3291
3292 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3293 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003294
3295 netdev->hw_enc_features = 0;
3296 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303297 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303298}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303299#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303300
Somnath Koturb05004a2013-12-05 12:08:16 +05303301static int be_clear(struct be_adapter *adapter)
3302{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303303 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003304
Sathya Perla11ac75e2011-12-13 00:58:50 +00003305 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003306 be_vf_clear(adapter);
3307
Vasundhara Volambec84e62014-06-30 13:01:32 +05303308 /* Re-configure FW to distribute resources evenly across max-supported
3309 * number of VFs, only when VFs are not already enabled.
3310 */
3311 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3312 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3313 pci_sriov_get_totalvfs(adapter->pdev));
3314
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303315#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303316 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303317#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303318 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303319 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003320
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003321 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003322
Sathya Perla77071332013-08-27 16:57:34 +05303323 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003325 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303326 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003327 return 0;
3328}
3329
Kalesh AP0700d812015-01-20 03:51:43 -05003330static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3331 u32 cap_flags, u32 vf)
3332{
3333 u32 en_flags;
3334 int status;
3335
3336 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3337 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3338 BE_IF_FLAGS_RSS;
3339
3340 en_flags &= cap_flags;
3341
3342 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3343 if_handle, vf);
3344
3345 return status;
3346}
3347
Sathya Perla4c876612013-02-03 20:30:11 +00003348static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003349{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303350 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003351 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003352 u32 cap_flags, vf;
3353 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003354
Kalesh AP0700d812015-01-20 03:51:43 -05003355 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003356 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3357 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003358
Sathya Perla4c876612013-02-03 20:30:11 +00003359 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303360 if (!BE3_chip(adapter)) {
3361 status = be_cmd_get_profile_config(adapter, &res,
3362 vf + 1);
3363 if (!status)
3364 cap_flags = res.if_cap_flags;
3365 }
Sathya Perla4c876612013-02-03 20:30:11 +00003366
Kalesh AP0700d812015-01-20 03:51:43 -05003367 status = be_if_create(adapter, &vf_cfg->if_handle,
3368 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003369 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003370 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003371 }
Kalesh AP0700d812015-01-20 03:51:43 -05003372
3373 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003374}
3375
Sathya Perla39f1d942012-05-08 19:41:24 +00003376static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003377{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003378 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003379 int vf;
3380
Sathya Perla39f1d942012-05-08 19:41:24 +00003381 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3382 GFP_KERNEL);
3383 if (!adapter->vf_cfg)
3384 return -ENOMEM;
3385
Sathya Perla11ac75e2011-12-13 00:58:50 +00003386 for_all_vfs(adapter, vf_cfg, vf) {
3387 vf_cfg->if_handle = -1;
3388 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003389 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003390 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003391}
3392
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003393static int be_vf_setup(struct be_adapter *adapter)
3394{
Sathya Perla4c876612013-02-03 20:30:11 +00003395 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303396 struct be_vf_cfg *vf_cfg;
3397 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303398 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003399
Sathya Perla257a3fe2013-06-14 15:54:51 +05303400 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003401
3402 status = be_vf_setup_init(adapter);
3403 if (status)
3404 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003405
Sathya Perla4c876612013-02-03 20:30:11 +00003406 if (old_vfs) {
3407 for_all_vfs(adapter, vf_cfg, vf) {
3408 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3409 if (status)
3410 goto err;
3411 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003412
Sathya Perla4c876612013-02-03 20:30:11 +00003413 status = be_vfs_mac_query(adapter);
3414 if (status)
3415 goto err;
3416 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303417 status = be_vfs_if_create(adapter);
3418 if (status)
3419 goto err;
3420
Sathya Perla39f1d942012-05-08 19:41:24 +00003421 status = be_vf_eth_addr_config(adapter);
3422 if (status)
3423 goto err;
3424 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003425
Sathya Perla11ac75e2011-12-13 00:58:50 +00003426 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303427 /* Allow VFs to programs MAC/VLAN filters */
3428 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3429 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3430 status = be_cmd_set_fn_privileges(adapter,
3431 privileges |
3432 BE_PRIV_FILTMGMT,
3433 vf + 1);
3434 if (!status)
3435 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3436 vf);
3437 }
3438
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303439 /* Allow full available bandwidth */
3440 if (!old_vfs)
3441 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003442
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303443 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303444 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303445 be_cmd_set_logical_link_config(adapter,
3446 IFLA_VF_LINK_STATE_AUTO,
3447 vf+1);
3448 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003449 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003450
3451 if (!old_vfs) {
3452 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3453 if (status) {
3454 dev_err(dev, "SRIOV enable failed\n");
3455 adapter->num_vfs = 0;
3456 goto err;
3457 }
3458 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303459
3460 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003461 return 0;
3462err:
Sathya Perla4c876612013-02-03 20:30:11 +00003463 dev_err(dev, "VF setup failed\n");
3464 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003465 return status;
3466}
3467
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303468/* Converting function_mode bits on BE3 to SH mc_type enums */
3469
3470static u8 be_convert_mc_type(u32 function_mode)
3471{
Suresh Reddy66064db2014-06-23 16:41:29 +05303472 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303473 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303474 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303475 return FLEX10;
3476 else if (function_mode & VNIC_MODE)
3477 return vNIC2;
3478 else if (function_mode & UMC_ENABLED)
3479 return UMC;
3480 else
3481 return MC_NONE;
3482}
3483
Sathya Perla92bf14a2013-08-27 16:57:32 +05303484/* On BE2/BE3 FW does not suggest the supported limits */
3485static void BEx_get_resources(struct be_adapter *adapter,
3486 struct be_resources *res)
3487{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303488 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303489
3490 if (be_physfn(adapter))
3491 res->max_uc_mac = BE_UC_PMAC_COUNT;
3492 else
3493 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3494
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303495 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3496
3497 if (be_is_mc(adapter)) {
3498 /* Assuming that there are 4 channels per port,
3499 * when multi-channel is enabled
3500 */
3501 if (be_is_qnq_mode(adapter))
3502 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3503 else
3504 /* In a non-qnq multichannel mode, the pvid
3505 * takes up one vlan entry
3506 */
3507 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3508 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303509 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303510 }
3511
Sathya Perla92bf14a2013-08-27 16:57:32 +05303512 res->max_mcast_mac = BE_MAX_MC;
3513
Vasundhara Volama5243da2014-03-11 18:53:07 +05303514 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3515 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3516 * *only* if it is RSS-capable.
3517 */
3518 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3519 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303520 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303521 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303522 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3523 struct be_resources super_nic_res = {0};
3524
3525 /* On a SuperNIC profile, the driver needs to use the
3526 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3527 */
3528 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3529 /* Some old versions of BE3 FW don't report max_tx_qs value */
3530 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3531 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303532 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303533 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303534
3535 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3536 !use_sriov && be_physfn(adapter))
3537 res->max_rss_qs = (adapter->be3_native) ?
3538 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3539 res->max_rx_qs = res->max_rss_qs + 1;
3540
Suresh Reddye3dc8672014-01-06 13:02:25 +05303541 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303542 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303543 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3544 else
3545 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303546
3547 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3548 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3549 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3550}
3551
Sathya Perla30128032011-11-10 19:17:57 +00003552static void be_setup_init(struct be_adapter *adapter)
3553{
3554 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003555 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003556 adapter->if_handle = -1;
3557 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003558 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003559 if (be_physfn(adapter))
3560 adapter->cmd_privileges = MAX_PRIVILEGES;
3561 else
3562 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003563}
3564
Vasundhara Volambec84e62014-06-30 13:01:32 +05303565static int be_get_sriov_config(struct be_adapter *adapter)
3566{
3567 struct device *dev = &adapter->pdev->dev;
3568 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303569 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303570
3571 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303572 be_cmd_get_profile_config(adapter, &res, 0);
3573
Vasundhara Volambec84e62014-06-30 13:01:32 +05303574 if (BE3_chip(adapter) && !res.max_vfs) {
3575 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3576 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3577 }
3578
Sathya Perlad3d18312014-08-01 17:47:30 +05303579 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303580
3581 if (!be_max_vfs(adapter)) {
3582 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303583 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303584 adapter->num_vfs = 0;
3585 return 0;
3586 }
3587
Sathya Perlad3d18312014-08-01 17:47:30 +05303588 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3589
Vasundhara Volambec84e62014-06-30 13:01:32 +05303590 /* validate num_vfs module param */
3591 old_vfs = pci_num_vf(adapter->pdev);
3592 if (old_vfs) {
3593 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3594 if (old_vfs != num_vfs)
3595 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3596 adapter->num_vfs = old_vfs;
3597 } else {
3598 if (num_vfs > be_max_vfs(adapter)) {
3599 dev_info(dev, "Resources unavailable to init %d VFs\n",
3600 num_vfs);
3601 dev_info(dev, "Limiting to %d VFs\n",
3602 be_max_vfs(adapter));
3603 }
3604 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3605 }
3606
3607 return 0;
3608}
3609
Sathya Perla92bf14a2013-08-27 16:57:32 +05303610static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003611{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303612 struct device *dev = &adapter->pdev->dev;
3613 struct be_resources res = {0};
3614 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003615
Sathya Perla92bf14a2013-08-27 16:57:32 +05303616 if (BEx_chip(adapter)) {
3617 BEx_get_resources(adapter, &res);
3618 adapter->res = res;
3619 }
3620
Sathya Perla92bf14a2013-08-27 16:57:32 +05303621 /* For Lancer, SH etc read per-function resource limits from FW.
3622 * GET_FUNC_CONFIG returns per function guaranteed limits.
3623 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3624 */
Sathya Perla4c876612013-02-03 20:30:11 +00003625 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303626 status = be_cmd_get_func_config(adapter, &res);
3627 if (status)
3628 return status;
3629
3630 /* If RoCE may be enabled stash away half the EQs for RoCE */
3631 if (be_roce_supported(adapter))
3632 res.max_evt_qs /= 2;
3633 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003634 }
3635
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303636 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3637 be_max_txqs(adapter), be_max_rxqs(adapter),
3638 be_max_rss(adapter), be_max_eqs(adapter),
3639 be_max_vfs(adapter));
3640 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3641 be_max_uc(adapter), be_max_mc(adapter),
3642 be_max_vlans(adapter));
3643
Sathya Perla92bf14a2013-08-27 16:57:32 +05303644 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003645}
3646
Sathya Perlad3d18312014-08-01 17:47:30 +05303647static void be_sriov_config(struct be_adapter *adapter)
3648{
3649 struct device *dev = &adapter->pdev->dev;
3650 int status;
3651
3652 status = be_get_sriov_config(adapter);
3653 if (status) {
3654 dev_err(dev, "Failed to query SR-IOV configuration\n");
3655 dev_err(dev, "SR-IOV cannot be enabled\n");
3656 return;
3657 }
3658
3659 /* When the HW is in SRIOV capable configuration, the PF-pool
3660 * resources are equally distributed across the max-number of
3661 * VFs. The user may request only a subset of the max-vfs to be
3662 * enabled. Based on num_vfs, redistribute the resources across
3663 * num_vfs so that each VF will have access to more number of
3664 * resources. This facility is not available in BE3 FW.
3665 * Also, this is done by FW in Lancer chip.
3666 */
3667 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3668 status = be_cmd_set_sriov_config(adapter,
3669 adapter->pool_res,
3670 adapter->num_vfs);
3671 if (status)
3672 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3673 }
3674}
3675
Sathya Perla39f1d942012-05-08 19:41:24 +00003676static int be_get_config(struct be_adapter *adapter)
3677{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303678 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003679 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003680
Kalesh APe97e3cd2014-07-17 16:20:26 +05303681 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003682 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303683 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003684
Vasundhara Volam542963b2014-01-15 13:23:33 +05303685 if (be_physfn(adapter)) {
3686 status = be_cmd_get_active_profile(adapter, &profile_id);
3687 if (!status)
3688 dev_info(&adapter->pdev->dev,
3689 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303690 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303691
Sathya Perlad3d18312014-08-01 17:47:30 +05303692 if (!BE2_chip(adapter) && be_physfn(adapter))
3693 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303694
Sathya Perla92bf14a2013-08-27 16:57:32 +05303695 status = be_get_resources(adapter);
3696 if (status)
3697 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003698
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303699 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3700 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303701 if (!adapter->pmac_id)
3702 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003703
Sathya Perla92bf14a2013-08-27 16:57:32 +05303704 /* Sanitize cfg_num_qs based on HW and platform limits */
3705 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3706
3707 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003708}
3709
Sathya Perla95046b92013-07-23 15:25:02 +05303710static int be_mac_setup(struct be_adapter *adapter)
3711{
3712 u8 mac[ETH_ALEN];
3713 int status;
3714
3715 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3716 status = be_cmd_get_perm_mac(adapter, mac);
3717 if (status)
3718 return status;
3719
3720 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3721 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3722 } else {
3723 /* Maybe the HW was reset; dev_addr must be re-programmed */
3724 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3725 }
3726
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003727 /* For BE3-R VFs, the PF programs the initial MAC address */
3728 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3729 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3730 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303731 return 0;
3732}
3733
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303734static void be_schedule_worker(struct be_adapter *adapter)
3735{
3736 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3737 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3738}
3739
Sathya Perla77071332013-08-27 16:57:34 +05303740static int be_setup_queues(struct be_adapter *adapter)
3741{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303742 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303743 int status;
3744
3745 status = be_evt_queues_create(adapter);
3746 if (status)
3747 goto err;
3748
3749 status = be_tx_qs_create(adapter);
3750 if (status)
3751 goto err;
3752
3753 status = be_rx_cqs_create(adapter);
3754 if (status)
3755 goto err;
3756
3757 status = be_mcc_queues_create(adapter);
3758 if (status)
3759 goto err;
3760
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303761 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3762 if (status)
3763 goto err;
3764
3765 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3766 if (status)
3767 goto err;
3768
Sathya Perla77071332013-08-27 16:57:34 +05303769 return 0;
3770err:
3771 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3772 return status;
3773}
3774
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303775int be_update_queues(struct be_adapter *adapter)
3776{
3777 struct net_device *netdev = adapter->netdev;
3778 int status;
3779
3780 if (netif_running(netdev))
3781 be_close(netdev);
3782
3783 be_cancel_worker(adapter);
3784
3785 /* If any vectors have been shared with RoCE we cannot re-program
3786 * the MSIx table.
3787 */
3788 if (!adapter->num_msix_roce_vec)
3789 be_msix_disable(adapter);
3790
3791 be_clear_queues(adapter);
3792
3793 if (!msix_enabled(adapter)) {
3794 status = be_msix_enable(adapter);
3795 if (status)
3796 return status;
3797 }
3798
3799 status = be_setup_queues(adapter);
3800 if (status)
3801 return status;
3802
3803 be_schedule_worker(adapter);
3804
3805 if (netif_running(netdev))
3806 status = be_open(netdev);
3807
3808 return status;
3809}
3810
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003811static inline int fw_major_num(const char *fw_ver)
3812{
3813 int fw_major = 0, i;
3814
3815 i = sscanf(fw_ver, "%d.", &fw_major);
3816 if (i != 1)
3817 return 0;
3818
3819 return fw_major;
3820}
3821
Sathya Perla5fb379e2009-06-18 00:02:59 +00003822static int be_setup(struct be_adapter *adapter)
3823{
Sathya Perla39f1d942012-05-08 19:41:24 +00003824 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003825 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826
Sathya Perla30128032011-11-10 19:17:57 +00003827 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003828
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003829 if (!lancer_chip(adapter))
3830 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003831
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003832 status = be_get_config(adapter);
3833 if (status)
3834 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003835
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003836 status = be_msix_enable(adapter);
3837 if (status)
3838 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003839
Kalesh AP0700d812015-01-20 03:51:43 -05003840 status = be_if_create(adapter, &adapter->if_handle,
3841 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003842 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003843 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303845 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3846 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303847 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303848 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003849 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003850 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003852 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003853
Sathya Perla95046b92013-07-23 15:25:02 +05303854 status = be_mac_setup(adapter);
3855 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003856 goto err;
3857
Kalesh APe97e3cd2014-07-17 16:20:26 +05303858 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303859 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003860
Somnath Koture9e2a902013-10-24 14:37:53 +05303861 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303862 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303863 adapter->fw_ver);
3864 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3865 }
3866
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003867 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003868 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003869
3870 be_set_rx_mode(adapter->netdev);
3871
Suresh Reddy76a9e082014-01-15 13:23:40 +05303872 be_cmd_get_acpi_wol_cap(adapter);
3873
Kalesh AP00d594c2015-01-20 03:51:44 -05003874 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3875 adapter->rx_fc);
3876 if (status)
3877 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3878 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003879
Kalesh AP00d594c2015-01-20 03:51:44 -05003880 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3881 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003882
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303883 if (be_physfn(adapter))
3884 be_cmd_set_logical_link_config(adapter,
3885 IFLA_VF_LINK_STATE_AUTO, 0);
3886
Vasundhara Volambec84e62014-06-30 13:01:32 +05303887 if (adapter->num_vfs)
3888 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003889
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003890 status = be_cmd_get_phy_info(adapter);
3891 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003892 adapter->phy.fc_autoneg = 1;
3893
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303894 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303895 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003896 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003897err:
3898 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003899 return status;
3900}
3901
Ivan Vecera66268732011-12-08 01:31:21 +00003902#ifdef CONFIG_NET_POLL_CONTROLLER
3903static void be_netpoll(struct net_device *netdev)
3904{
3905 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003906 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003907 int i;
3908
Sathya Perlae49cc342012-11-27 19:50:02 +00003909 for_all_evt_queues(adapter, eqo, i) {
3910 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3911 napi_schedule(&eqo->napi);
3912 }
Ivan Vecera66268732011-12-08 01:31:21 +00003913}
3914#endif
3915
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303916static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003917
Sathya Perla306f1342011-08-02 19:57:45 +00003918static bool phy_flashing_required(struct be_adapter *adapter)
3919{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05003920 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003921 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003922}
3923
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003924static bool is_comp_in_ufi(struct be_adapter *adapter,
3925 struct flash_section_info *fsec, int type)
3926{
3927 int i = 0, img_type = 0;
3928 struct flash_section_info_g2 *fsec_g2 = NULL;
3929
Sathya Perlaca34fe32012-11-06 17:48:56 +00003930 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003931 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3932
3933 for (i = 0; i < MAX_FLASH_COMP; i++) {
3934 if (fsec_g2)
3935 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3936 else
3937 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3938
3939 if (img_type == type)
3940 return true;
3941 }
3942 return false;
3943
3944}
3945
Jingoo Han4188e7d2013-08-05 18:02:02 +09003946static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303947 int header_size,
3948 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003949{
3950 struct flash_section_info *fsec = NULL;
3951 const u8 *p = fw->data;
3952
3953 p += header_size;
3954 while (p < (fw->data + fw->size)) {
3955 fsec = (struct flash_section_info *)p;
3956 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3957 return fsec;
3958 p += 32;
3959 }
3960 return NULL;
3961}
3962
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303963static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3964 u32 img_offset, u32 img_size, int hdr_size,
3965 u16 img_optype, bool *crc_match)
3966{
3967 u32 crc_offset;
3968 int status;
3969 u8 crc[4];
3970
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003971 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3972 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303973 if (status)
3974 return status;
3975
3976 crc_offset = hdr_size + img_offset + img_size - 4;
3977
3978 /* Skip flashing, if crc of flashed region matches */
3979 if (!memcmp(crc, p + crc_offset, 4))
3980 *crc_match = true;
3981 else
3982 *crc_match = false;
3983
3984 return status;
3985}
3986
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003987static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003988 struct be_dma_mem *flash_cmd, int optype, int img_size,
3989 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003990{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003991 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003992 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303993 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003994
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003995 while (total_bytes) {
3996 num_bytes = min_t(u32, 32*1024, total_bytes);
3997
3998 total_bytes -= num_bytes;
3999
4000 if (!total_bytes) {
4001 if (optype == OPTYPE_PHY_FW)
4002 flash_op = FLASHROM_OPER_PHY_FLASH;
4003 else
4004 flash_op = FLASHROM_OPER_FLASH;
4005 } else {
4006 if (optype == OPTYPE_PHY_FW)
4007 flash_op = FLASHROM_OPER_PHY_SAVE;
4008 else
4009 flash_op = FLASHROM_OPER_SAVE;
4010 }
4011
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004012 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004013 img += num_bytes;
4014 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004015 flash_op, img_offset +
4016 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304017 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304018 optype == OPTYPE_PHY_FW)
4019 break;
4020 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004021 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004022
4023 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004024 }
4025 return 0;
4026}
4027
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004028/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004029static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304030 const struct firmware *fw,
4031 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004032{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004033 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304034 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004035 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304036 int status, i, filehdr_size, num_comp;
4037 const struct flash_comp *pflashcomp;
4038 bool crc_match;
4039 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004040
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004041 struct flash_comp gen3_flash_types[] = {
4042 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4043 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4044 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4045 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4046 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4047 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4048 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4049 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4050 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4051 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4052 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4053 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4054 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4055 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4056 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4057 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4058 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4059 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4060 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4061 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004062 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004063
4064 struct flash_comp gen2_flash_types[] = {
4065 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4066 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4067 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4068 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4069 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4070 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4071 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4072 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4073 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4074 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4075 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4076 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4077 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4078 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4079 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4080 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004081 };
4082
Sathya Perlaca34fe32012-11-06 17:48:56 +00004083 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004084 pflashcomp = gen3_flash_types;
4085 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004086 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004087 } else {
4088 pflashcomp = gen2_flash_types;
4089 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004090 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004091 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004092 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004093
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004094 /* Get flash section info*/
4095 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4096 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304097 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004098 return -1;
4099 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004100 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004101 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004102 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004103
4104 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4105 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4106 continue;
4107
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004108 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4109 !phy_flashing_required(adapter))
4110 continue;
4111
4112 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304113 status = be_check_flash_crc(adapter, fw->data,
4114 pflashcomp[i].offset,
4115 pflashcomp[i].size,
4116 filehdr_size +
4117 img_hdrs_size,
4118 OPTYPE_REDBOOT, &crc_match);
4119 if (status) {
4120 dev_err(dev,
4121 "Could not get CRC for 0x%x region\n",
4122 pflashcomp[i].optype);
4123 continue;
4124 }
4125
4126 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004127 continue;
4128 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004129
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304130 p = fw->data + filehdr_size + pflashcomp[i].offset +
4131 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004132 if (p + pflashcomp[i].size > fw->data + fw->size)
4133 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004134
4135 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004136 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004137 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304138 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004139 pflashcomp[i].img_type);
4140 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004141 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004142 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004143 return 0;
4144}
4145
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304146static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4147{
4148 u32 img_type = le32_to_cpu(fsec_entry.type);
4149 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4150
4151 if (img_optype != 0xFFFF)
4152 return img_optype;
4153
4154 switch (img_type) {
4155 case IMAGE_FIRMWARE_iSCSI:
4156 img_optype = OPTYPE_ISCSI_ACTIVE;
4157 break;
4158 case IMAGE_BOOT_CODE:
4159 img_optype = OPTYPE_REDBOOT;
4160 break;
4161 case IMAGE_OPTION_ROM_ISCSI:
4162 img_optype = OPTYPE_BIOS;
4163 break;
4164 case IMAGE_OPTION_ROM_PXE:
4165 img_optype = OPTYPE_PXE_BIOS;
4166 break;
4167 case IMAGE_OPTION_ROM_FCoE:
4168 img_optype = OPTYPE_FCOE_BIOS;
4169 break;
4170 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4171 img_optype = OPTYPE_ISCSI_BACKUP;
4172 break;
4173 case IMAGE_NCSI:
4174 img_optype = OPTYPE_NCSI_FW;
4175 break;
4176 case IMAGE_FLASHISM_JUMPVECTOR:
4177 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4178 break;
4179 case IMAGE_FIRMWARE_PHY:
4180 img_optype = OPTYPE_SH_PHY_FW;
4181 break;
4182 case IMAGE_REDBOOT_DIR:
4183 img_optype = OPTYPE_REDBOOT_DIR;
4184 break;
4185 case IMAGE_REDBOOT_CONFIG:
4186 img_optype = OPTYPE_REDBOOT_CONFIG;
4187 break;
4188 case IMAGE_UFI_DIR:
4189 img_optype = OPTYPE_UFI_DIR;
4190 break;
4191 default:
4192 break;
4193 }
4194
4195 return img_optype;
4196}
4197
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004198static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304199 const struct firmware *fw,
4200 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004201{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004202 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004203 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304204 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004205 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304206 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004207 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304208 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304209 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004210
4211 filehdr_size = sizeof(struct flash_file_hdr_g3);
4212 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4213 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304214 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304215 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004216 }
4217
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004218retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004219 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4220 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4221 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304222 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4223 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4224 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004225
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304226 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004227 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004228
4229 if (flash_offset_support)
4230 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4231 else
4232 flash_optype = img_optype;
4233
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304234 /* Don't bother verifying CRC if an old FW image is being
4235 * flashed
4236 */
4237 if (old_fw_img)
4238 goto flash;
4239
4240 status = be_check_flash_crc(adapter, fw->data, img_offset,
4241 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004242 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304243 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304244 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4245 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004246 /* The current FW image on the card does not support
4247 * OFFSET based flashing. Retry using older mechanism
4248 * of OPTYPE based flashing
4249 */
4250 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4251 flash_offset_support = false;
4252 goto retry_flash;
4253 }
4254
4255 /* The current FW image on the card does not recognize
4256 * the new FLASH op_type. The FW download is partially
4257 * complete. Reboot the server now to enable FW image
4258 * to recognize the new FLASH op_type. To complete the
4259 * remaining process, download the same FW again after
4260 * the reboot.
4261 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304262 dev_err(dev, "Flash incomplete. Reset the server\n");
4263 dev_err(dev, "Download FW image again after reset\n");
4264 return -EAGAIN;
4265 } else if (status) {
4266 dev_err(dev, "Could not get CRC for 0x%x region\n",
4267 img_optype);
4268 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004269 }
4270
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304271 if (crc_match)
4272 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004273
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304274flash:
4275 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004276 if (p + img_size > fw->data + fw->size)
4277 return -1;
4278
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004279 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4280 img_offset);
4281
4282 /* The current FW image on the card does not support OFFSET
4283 * based flashing. Retry using older mechanism of OPTYPE based
4284 * flashing
4285 */
4286 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4287 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4288 flash_offset_support = false;
4289 goto retry_flash;
4290 }
4291
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304292 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4293 * UFI_DIR region
4294 */
Kalesh AP4c600052014-05-30 19:06:26 +05304295 if (old_fw_img &&
4296 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4297 (img_optype == OPTYPE_UFI_DIR &&
4298 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304299 continue;
4300 } else if (status) {
4301 dev_err(dev, "Flashing section type 0x%x failed\n",
4302 img_type);
4303 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004304 }
4305 }
4306 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004307}
4308
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004309static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304310 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004311{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004312#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4313#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304314 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004315 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004316 const u8 *data_ptr = NULL;
4317 u8 *dest_image_ptr = NULL;
4318 size_t image_size = 0;
4319 u32 chunk_size = 0;
4320 u32 data_written = 0;
4321 u32 offset = 0;
4322 int status = 0;
4323 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004324 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004325
4326 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304327 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304328 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004329 }
4330
4331 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4332 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304333 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004334 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304335 if (!flash_cmd.va)
4336 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004337
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004338 dest_image_ptr = flash_cmd.va +
4339 sizeof(struct lancer_cmd_req_write_object);
4340 image_size = fw->size;
4341 data_ptr = fw->data;
4342
4343 while (image_size) {
4344 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4345
4346 /* Copy the image chunk content. */
4347 memcpy(dest_image_ptr, data_ptr, chunk_size);
4348
4349 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004350 chunk_size, offset,
4351 LANCER_FW_DOWNLOAD_LOCATION,
4352 &data_written, &change_status,
4353 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004354 if (status)
4355 break;
4356
4357 offset += data_written;
4358 data_ptr += data_written;
4359 image_size -= data_written;
4360 }
4361
4362 if (!status) {
4363 /* Commit the FW written */
4364 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004365 0, offset,
4366 LANCER_FW_DOWNLOAD_LOCATION,
4367 &data_written, &change_status,
4368 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004369 }
4370
Kalesh APbb864e02014-09-02 09:56:51 +05304371 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004372 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304373 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304374 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004375 }
4376
Kalesh APbb864e02014-09-02 09:56:51 +05304377 dev_info(dev, "Firmware flashed successfully\n");
4378
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004379 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304380 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004381 status = lancer_physdev_ctrl(adapter,
4382 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004383 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304384 dev_err(dev, "Adapter busy, could not reset FW\n");
4385 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004386 }
4387 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304388 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004389 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304390
4391 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004392}
4393
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004394#define BE2_UFI 2
4395#define BE3_UFI 3
4396#define BE3R_UFI 10
4397#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004398#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004399
Sathya Perlaca34fe32012-11-06 17:48:56 +00004400static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004401 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004402{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004403 if (!fhdr) {
4404 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4405 return -1;
4406 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004407
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004408 /* First letter of the build version is used to identify
4409 * which chip this image file is meant for.
4410 */
4411 switch (fhdr->build[0]) {
4412 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004413 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4414 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004415 case BLD_STR_UFI_TYPE_BE3:
4416 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4417 BE3_UFI;
4418 case BLD_STR_UFI_TYPE_BE2:
4419 return BE2_UFI;
4420 default:
4421 return -1;
4422 }
4423}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004424
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004425/* Check if the flash image file is compatible with the adapter that
4426 * is being flashed.
4427 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004428 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004429 */
4430static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4431 struct flash_file_hdr_g3 *fhdr)
4432{
4433 int ufi_type = be_get_ufi_type(adapter, fhdr);
4434
4435 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004436 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004437 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004438 case SH_UFI:
4439 return (skyhawk_chip(adapter) &&
4440 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004441 case BE3R_UFI:
4442 return BE3_chip(adapter);
4443 case BE3_UFI:
4444 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4445 case BE2_UFI:
4446 return BE2_chip(adapter);
4447 default:
4448 return false;
4449 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004450}
4451
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004452static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4453{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004454 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004455 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004456 struct image_hdr *img_hdr_ptr;
4457 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004458 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004459
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004460 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4461 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4462 dev_err(dev, "Flash image is not compatible with adapter\n");
4463 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004464 }
4465
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004466 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4467 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4468 GFP_KERNEL);
4469 if (!flash_cmd.va)
4470 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004471
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004472 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4473 for (i = 0; i < num_imgs; i++) {
4474 img_hdr_ptr = (struct image_hdr *)(fw->data +
4475 (sizeof(struct flash_file_hdr_g3) +
4476 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004477 if (!BE2_chip(adapter) &&
4478 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4479 continue;
4480
4481 if (skyhawk_chip(adapter))
4482 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4483 num_imgs);
4484 else
4485 status = be_flash_BEx(adapter, fw, &flash_cmd,
4486 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004487 }
4488
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004489 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4490 if (!status)
4491 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004492
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004493 return status;
4494}
4495
4496int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4497{
4498 const struct firmware *fw;
4499 int status;
4500
4501 if (!netif_running(adapter->netdev)) {
4502 dev_err(&adapter->pdev->dev,
4503 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304504 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004505 }
4506
4507 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4508 if (status)
4509 goto fw_exit;
4510
4511 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4512
4513 if (lancer_chip(adapter))
4514 status = lancer_fw_download(adapter, fw);
4515 else
4516 status = be_fw_download(adapter, fw);
4517
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004518 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304519 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004520
Ajit Khaparde84517482009-09-04 03:12:16 +00004521fw_exit:
4522 release_firmware(fw);
4523 return status;
4524}
4525
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004526static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4527 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004528{
4529 struct be_adapter *adapter = netdev_priv(dev);
4530 struct nlattr *attr, *br_spec;
4531 int rem;
4532 int status = 0;
4533 u16 mode = 0;
4534
4535 if (!sriov_enabled(adapter))
4536 return -EOPNOTSUPP;
4537
4538 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004539 if (!br_spec)
4540 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004541
4542 nla_for_each_nested(attr, br_spec, rem) {
4543 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4544 continue;
4545
Thomas Grafb7c1a312014-11-26 13:42:17 +01004546 if (nla_len(attr) < sizeof(mode))
4547 return -EINVAL;
4548
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004549 mode = nla_get_u16(attr);
4550 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4551 return -EINVAL;
4552
4553 status = be_cmd_set_hsw_config(adapter, 0, 0,
4554 adapter->if_handle,
4555 mode == BRIDGE_MODE_VEPA ?
4556 PORT_FWD_TYPE_VEPA :
4557 PORT_FWD_TYPE_VEB);
4558 if (status)
4559 goto err;
4560
4561 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4562 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4563
4564 return status;
4565 }
4566err:
4567 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4568 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4569
4570 return status;
4571}
4572
4573static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304574 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004575{
4576 struct be_adapter *adapter = netdev_priv(dev);
4577 int status = 0;
4578 u8 hsw_mode;
4579
4580 if (!sriov_enabled(adapter))
4581 return 0;
4582
4583 /* BE and Lancer chips support VEB mode only */
4584 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4585 hsw_mode = PORT_FWD_TYPE_VEB;
4586 } else {
4587 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4588 adapter->if_handle, &hsw_mode);
4589 if (status)
4590 return 0;
4591 }
4592
4593 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4594 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004595 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4596 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004597}
4598
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304599#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004600/* VxLAN offload Notes:
4601 *
4602 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4603 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4604 * is expected to work across all types of IP tunnels once exported. Skyhawk
4605 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304606 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4607 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4608 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004609 *
4610 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4611 * adds more than one port, disable offloads and don't re-enable them again
4612 * until after all the tunnels are removed.
4613 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304614static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4615 __be16 port)
4616{
4617 struct be_adapter *adapter = netdev_priv(netdev);
4618 struct device *dev = &adapter->pdev->dev;
4619 int status;
4620
4621 if (lancer_chip(adapter) || BEx_chip(adapter))
4622 return;
4623
4624 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304625 dev_info(dev,
4626 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004627 dev_info(dev, "Disabling VxLAN offloads\n");
4628 adapter->vxlan_port_count++;
4629 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304630 }
4631
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004632 if (adapter->vxlan_port_count++ >= 1)
4633 return;
4634
Sathya Perlac9c47142014-03-27 10:46:19 +05304635 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4636 OP_CONVERT_NORMAL_TO_TUNNEL);
4637 if (status) {
4638 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4639 goto err;
4640 }
4641
4642 status = be_cmd_set_vxlan_port(adapter, port);
4643 if (status) {
4644 dev_warn(dev, "Failed to add VxLAN port\n");
4645 goto err;
4646 }
4647 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4648 adapter->vxlan_port = port;
4649
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004650 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4651 NETIF_F_TSO | NETIF_F_TSO6 |
4652 NETIF_F_GSO_UDP_TUNNEL;
4653 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304654 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004655
Sathya Perlac9c47142014-03-27 10:46:19 +05304656 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4657 be16_to_cpu(port));
4658 return;
4659err:
4660 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304661}
4662
4663static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4664 __be16 port)
4665{
4666 struct be_adapter *adapter = netdev_priv(netdev);
4667
4668 if (lancer_chip(adapter) || BEx_chip(adapter))
4669 return;
4670
4671 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004672 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304673
4674 be_disable_vxlan_offloads(adapter);
4675
4676 dev_info(&adapter->pdev->dev,
4677 "Disabled VxLAN offloads for UDP port %d\n",
4678 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004679done:
4680 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304681}
Joe Stringer725d5482014-11-13 16:38:13 -08004682
Jesse Gross5f352272014-12-23 22:37:26 -08004683static netdev_features_t be_features_check(struct sk_buff *skb,
4684 struct net_device *dev,
4685 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004686{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304687 struct be_adapter *adapter = netdev_priv(dev);
4688 u8 l4_hdr = 0;
4689
4690 /* The code below restricts offload features for some tunneled packets.
4691 * Offload features for normal (non tunnel) packets are unchanged.
4692 */
4693 if (!skb->encapsulation ||
4694 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4695 return features;
4696
4697 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4698 * should disable tunnel offload features if it's not a VxLAN packet,
4699 * as tunnel offloads have been enabled only for VxLAN. This is done to
4700 * allow other tunneled traffic like GRE work fine while VxLAN
4701 * offloads are configured in Skyhawk-R.
4702 */
4703 switch (vlan_get_protocol(skb)) {
4704 case htons(ETH_P_IP):
4705 l4_hdr = ip_hdr(skb)->protocol;
4706 break;
4707 case htons(ETH_P_IPV6):
4708 l4_hdr = ipv6_hdr(skb)->nexthdr;
4709 break;
4710 default:
4711 return features;
4712 }
4713
4714 if (l4_hdr != IPPROTO_UDP ||
4715 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4716 skb->inner_protocol != htons(ETH_P_TEB) ||
4717 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4718 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4719 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4720
4721 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004722}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304723#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304724
stephen hemmingere5686ad2012-01-05 19:10:25 +00004725static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004726 .ndo_open = be_open,
4727 .ndo_stop = be_close,
4728 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004729 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004730 .ndo_set_mac_address = be_mac_addr_set,
4731 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004732 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004733 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004734 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4735 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004736 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004737 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004738 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004739 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304740 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004741#ifdef CONFIG_NET_POLL_CONTROLLER
4742 .ndo_poll_controller = be_netpoll,
4743#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004744 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4745 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304746#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304747 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304748#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304749#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304750 .ndo_add_vxlan_port = be_add_vxlan_port,
4751 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004752 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304753#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004754};
4755
4756static void be_netdev_init(struct net_device *netdev)
4757{
4758 struct be_adapter *adapter = netdev_priv(netdev);
4759
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004760 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004761 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004762 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004763 if (be_multi_rxq(adapter))
4764 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004765
4766 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004767 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004768
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004769 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004770 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004771
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004772 netdev->priv_flags |= IFF_UNICAST_FLT;
4773
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004774 netdev->flags |= IFF_MULTICAST;
4775
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004776 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004777
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004778 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004779
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004780 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004781}
4782
4783static void be_unmap_pci_bars(struct be_adapter *adapter)
4784{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004785 if (adapter->csr)
4786 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004787 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004788 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004789}
4790
Sathya Perlace66f782012-11-06 17:48:58 +00004791static int db_bar(struct be_adapter *adapter)
4792{
4793 if (lancer_chip(adapter) || !be_physfn(adapter))
4794 return 0;
4795 else
4796 return 4;
4797}
4798
4799static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004800{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004801 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004802 adapter->roce_db.size = 4096;
4803 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4804 db_bar(adapter));
4805 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4806 db_bar(adapter));
4807 }
Parav Pandit045508a2012-03-26 14:27:13 +00004808 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004809}
4810
4811static int be_map_pci_bars(struct be_adapter *adapter)
4812{
4813 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004814
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004815 if (BEx_chip(adapter) && be_physfn(adapter)) {
4816 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304817 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004818 return -ENOMEM;
4819 }
4820
Sathya Perlace66f782012-11-06 17:48:58 +00004821 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304822 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004823 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004824 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004825
4826 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004827 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004828
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004829pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304830 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004831 be_unmap_pci_bars(adapter);
4832 return -ENOMEM;
4833}
4834
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004835static void be_ctrl_cleanup(struct be_adapter *adapter)
4836{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004837 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004838
4839 be_unmap_pci_bars(adapter);
4840
4841 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004842 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4843 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004844
Sathya Perla5b8821b2011-08-02 19:57:44 +00004845 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004846 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004847 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4848 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004849}
4850
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004851static int be_ctrl_init(struct be_adapter *adapter)
4852{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004853 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4854 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004855 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004856 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004857 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004858
Sathya Perlace66f782012-11-06 17:48:58 +00004859 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4860 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4861 SLI_INTF_FAMILY_SHIFT;
4862 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4863
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004864 status = be_map_pci_bars(adapter);
4865 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004866 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004867
4868 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004869 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4870 mbox_mem_alloc->size,
4871 &mbox_mem_alloc->dma,
4872 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004873 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004874 status = -ENOMEM;
4875 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004876 }
4877 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4878 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4879 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4880 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004881
Sathya Perla5b8821b2011-08-02 19:57:44 +00004882 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004883 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4884 rx_filter->size, &rx_filter->dma,
4885 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304886 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004887 status = -ENOMEM;
4888 goto free_mbox;
4889 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004890
Ivan Vecera29849612010-12-14 05:43:19 +00004891 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004892 spin_lock_init(&adapter->mcc_lock);
4893 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004894
Suresh Reddy5eeff632014-01-06 13:02:24 +05304895 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004896 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004897 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004898
4899free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004900 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4901 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004902
4903unmap_pci_bars:
4904 be_unmap_pci_bars(adapter);
4905
4906done:
4907 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004908}
4909
4910static void be_stats_cleanup(struct be_adapter *adapter)
4911{
Sathya Perla3abcded2010-10-03 22:12:27 -07004912 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004913
4914 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004915 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4916 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004917}
4918
4919static int be_stats_init(struct be_adapter *adapter)
4920{
Sathya Perla3abcded2010-10-03 22:12:27 -07004921 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004922
Sathya Perlaca34fe32012-11-06 17:48:56 +00004923 if (lancer_chip(adapter))
4924 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4925 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004926 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004927 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004928 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004929 else
4930 /* ALL non-BE ASICs */
4931 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004932
Joe Perchesede23fa2013-08-26 22:45:23 -07004933 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4934 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304935 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304936 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004937 return 0;
4938}
4939
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004940static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004941{
4942 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004943
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004944 if (!adapter)
4945 return;
4946
Parav Pandit045508a2012-03-26 14:27:13 +00004947 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004948 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004949
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004950 cancel_delayed_work_sync(&adapter->func_recovery_work);
4951
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004952 unregister_netdev(adapter->netdev);
4953
Sathya Perla5fb379e2009-06-18 00:02:59 +00004954 be_clear(adapter);
4955
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004956 /* tell fw we're done with firing cmds */
4957 be_cmd_fw_clean(adapter);
4958
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004959 be_stats_cleanup(adapter);
4960
4961 be_ctrl_cleanup(adapter);
4962
Sathya Perlad6b6d982012-09-05 01:56:48 +00004963 pci_disable_pcie_error_reporting(pdev);
4964
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004965 pci_release_regions(pdev);
4966 pci_disable_device(pdev);
4967
4968 free_netdev(adapter->netdev);
4969}
4970
Sathya Perla39f1d942012-05-08 19:41:24 +00004971static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004972{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304973 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004974
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004975 status = be_cmd_get_cntl_attributes(adapter);
4976 if (status)
4977 return status;
4978
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004979 /* Must be a power of 2 or else MODULO will BUG_ON */
4980 adapter->be_get_temp_freq = 64;
4981
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304982 if (BEx_chip(adapter)) {
4983 level = be_cmd_get_fw_log_level(adapter);
4984 adapter->msg_enable =
4985 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4986 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004987
Sathya Perla92bf14a2013-08-27 16:57:32 +05304988 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004989 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004990}
4991
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004992static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004993{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004994 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004995 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004996
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004997 status = lancer_test_and_set_rdy_state(adapter);
4998 if (status)
4999 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005000
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005001 if (netif_running(adapter->netdev))
5002 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005003
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005004 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005005
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005006 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005007
5008 status = be_setup(adapter);
5009 if (status)
5010 goto err;
5011
5012 if (netif_running(adapter->netdev)) {
5013 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005014 if (status)
5015 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005016 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005017
Somnath Kotur4bebb562013-12-05 12:07:55 +05305018 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005019 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005020err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005021 if (status == -EAGAIN)
5022 dev_err(dev, "Waiting for resource provisioning\n");
5023 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05305024 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005025
5026 return status;
5027}
5028
5029static void be_func_recovery_task(struct work_struct *work)
5030{
5031 struct be_adapter *adapter =
5032 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005033 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005034
5035 be_detect_error(adapter);
5036
5037 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005038 rtnl_lock();
5039 netif_device_detach(adapter->netdev);
5040 rtnl_unlock();
5041
5042 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005043 if (!status)
5044 netif_device_attach(adapter->netdev);
5045 }
5046
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005047 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5048 * no need to attempt further recovery.
5049 */
5050 if (!status || status == -EAGAIN)
5051 schedule_delayed_work(&adapter->func_recovery_work,
5052 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005053}
5054
5055static void be_worker(struct work_struct *work)
5056{
5057 struct be_adapter *adapter =
5058 container_of(work, struct be_adapter, work.work);
5059 struct be_rx_obj *rxo;
5060 int i;
5061
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005062 /* when interrupts are not yet enabled, just reap any pending
5063 * mcc completions */
5064 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005065 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005066 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005067 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005068 goto reschedule;
5069 }
5070
5071 if (!adapter->stats_cmd_sent) {
5072 if (lancer_chip(adapter))
5073 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305074 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005075 else
5076 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5077 }
5078
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305079 if (be_physfn(adapter) &&
5080 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005081 be_cmd_get_die_temperature(adapter);
5082
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005083 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305084 /* Replenish RX-queues starved due to memory
5085 * allocation failures.
5086 */
5087 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305088 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005089 }
5090
Sathya Perla2632baf2013-10-01 16:00:00 +05305091 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005092
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005093reschedule:
5094 adapter->work_counter++;
5095 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5096}
5097
Sathya Perla257a3fe2013-06-14 15:54:51 +05305098/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00005099static bool be_reset_required(struct be_adapter *adapter)
5100{
Sathya Perla257a3fe2013-06-14 15:54:51 +05305101 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00005102}
5103
Sathya Perlad3791422012-09-28 04:39:44 +00005104static char *mc_name(struct be_adapter *adapter)
5105{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305106 char *str = ""; /* default */
5107
5108 switch (adapter->mc_type) {
5109 case UMC:
5110 str = "UMC";
5111 break;
5112 case FLEX10:
5113 str = "FLEX10";
5114 break;
5115 case vNIC1:
5116 str = "vNIC-1";
5117 break;
5118 case nPAR:
5119 str = "nPAR";
5120 break;
5121 case UFP:
5122 str = "UFP";
5123 break;
5124 case vNIC2:
5125 str = "vNIC-2";
5126 break;
5127 default:
5128 str = "";
5129 }
5130
5131 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005132}
5133
5134static inline char *func_name(struct be_adapter *adapter)
5135{
5136 return be_physfn(adapter) ? "PF" : "VF";
5137}
5138
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005139static inline char *nic_name(struct pci_dev *pdev)
5140{
5141 switch (pdev->device) {
5142 case OC_DEVICE_ID1:
5143 return OC_NAME;
5144 case OC_DEVICE_ID2:
5145 return OC_NAME_BE;
5146 case OC_DEVICE_ID3:
5147 case OC_DEVICE_ID4:
5148 return OC_NAME_LANCER;
5149 case BE_DEVICE_ID2:
5150 return BE3_NAME;
5151 case OC_DEVICE_ID5:
5152 case OC_DEVICE_ID6:
5153 return OC_NAME_SH;
5154 default:
5155 return BE_NAME;
5156 }
5157}
5158
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005159static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005160{
5161 int status = 0;
5162 struct be_adapter *adapter;
5163 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005164 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005165
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305166 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005168 status = pci_enable_device(pdev);
5169 if (status)
5170 goto do_none;
5171
5172 status = pci_request_regions(pdev, DRV_NAME);
5173 if (status)
5174 goto disable_dev;
5175 pci_set_master(pdev);
5176
Sathya Perla7f640062012-06-05 19:37:20 +00005177 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305178 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005179 status = -ENOMEM;
5180 goto rel_reg;
5181 }
5182 adapter = netdev_priv(netdev);
5183 adapter->pdev = pdev;
5184 pci_set_drvdata(pdev, adapter);
5185 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005186 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005187
Russell King4c15c242013-06-26 23:49:11 +01005188 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005189 if (!status) {
5190 netdev->features |= NETIF_F_HIGHDMA;
5191 } else {
Russell King4c15c242013-06-26 23:49:11 +01005192 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005193 if (status) {
5194 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5195 goto free_netdev;
5196 }
5197 }
5198
Kalesh AP2f951a92014-09-12 17:39:21 +05305199 status = pci_enable_pcie_error_reporting(pdev);
5200 if (!status)
5201 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005202
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005203 status = be_ctrl_init(adapter);
5204 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005205 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005206
Sathya Perla2243e2e2009-11-22 22:02:03 +00005207 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005208 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005209 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005210 if (status)
5211 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005212 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00005213
Sathya Perla39f1d942012-05-08 19:41:24 +00005214 if (be_reset_required(adapter)) {
5215 status = be_cmd_reset_function(adapter);
5216 if (status)
5217 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07005218
Kalesh AP2d177be2013-04-28 22:22:29 +00005219 /* Wait for interrupts to quiesce after an FLR */
5220 msleep(100);
5221 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005222
5223 /* Allow interrupts for other ULPs running on NIC function */
5224 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005225
Kalesh AP2d177be2013-04-28 22:22:29 +00005226 /* tell fw we're ready to fire cmds */
5227 status = be_cmd_fw_init(adapter);
5228 if (status)
5229 goto ctrl_clean;
5230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005231 status = be_stats_init(adapter);
5232 if (status)
5233 goto ctrl_clean;
5234
Sathya Perla39f1d942012-05-08 19:41:24 +00005235 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005236 if (status)
5237 goto stats_clean;
5238
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005239 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005240 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05305241 adapter->rx_fc = true;
5242 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005243
Sathya Perla5fb379e2009-06-18 00:02:59 +00005244 status = be_setup(adapter);
5245 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00005246 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005247
Sathya Perla3abcded2010-10-03 22:12:27 -07005248 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005249 status = register_netdev(netdev);
5250 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005251 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005252
Parav Pandit045508a2012-03-26 14:27:13 +00005253 be_roce_dev_add(adapter);
5254
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005255 schedule_delayed_work(&adapter->func_recovery_work,
5256 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005257
5258 be_cmd_query_port_name(adapter, &port_name);
5259
Sathya Perlad3791422012-09-28 04:39:44 +00005260 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5261 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005262
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005263 return 0;
5264
Sathya Perla5fb379e2009-06-18 00:02:59 +00005265unsetup:
5266 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005267stats_clean:
5268 be_stats_cleanup(adapter);
5269ctrl_clean:
5270 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005271free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005272 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005273rel_reg:
5274 pci_release_regions(pdev);
5275disable_dev:
5276 pci_disable_device(pdev);
5277do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005278 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005279 return status;
5280}
5281
5282static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5283{
5284 struct be_adapter *adapter = pci_get_drvdata(pdev);
5285 struct net_device *netdev = adapter->netdev;
5286
Suresh Reddy76a9e082014-01-15 13:23:40 +05305287 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005288 be_setup_wol(adapter, true);
5289
Ajit Khaparded4360d62013-11-22 12:51:09 -06005290 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005291 cancel_delayed_work_sync(&adapter->func_recovery_work);
5292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005293 netif_device_detach(netdev);
5294 if (netif_running(netdev)) {
5295 rtnl_lock();
5296 be_close(netdev);
5297 rtnl_unlock();
5298 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005299 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005300
5301 pci_save_state(pdev);
5302 pci_disable_device(pdev);
5303 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5304 return 0;
5305}
5306
5307static int be_resume(struct pci_dev *pdev)
5308{
5309 int status = 0;
5310 struct be_adapter *adapter = pci_get_drvdata(pdev);
5311 struct net_device *netdev = adapter->netdev;
5312
5313 netif_device_detach(netdev);
5314
5315 status = pci_enable_device(pdev);
5316 if (status)
5317 return status;
5318
Yijing Wang1ca01512013-06-27 20:53:42 +08005319 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005320 pci_restore_state(pdev);
5321
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305322 status = be_fw_wait_ready(adapter);
5323 if (status)
5324 return status;
5325
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005326 status = be_cmd_reset_function(adapter);
5327 if (status)
5328 return status;
5329
Ajit Khaparded4360d62013-11-22 12:51:09 -06005330 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005331 /* tell fw we're ready to fire cmds */
5332 status = be_cmd_fw_init(adapter);
5333 if (status)
5334 return status;
5335
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005336 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005337 if (netif_running(netdev)) {
5338 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005339 be_open(netdev);
5340 rtnl_unlock();
5341 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005342
5343 schedule_delayed_work(&adapter->func_recovery_work,
5344 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005345 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005346
Suresh Reddy76a9e082014-01-15 13:23:40 +05305347 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005348 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005349
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005350 return 0;
5351}
5352
Sathya Perla82456b02010-02-17 01:35:37 +00005353/*
5354 * An FLR will stop BE from DMAing any data.
5355 */
5356static void be_shutdown(struct pci_dev *pdev)
5357{
5358 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005359
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005360 if (!adapter)
5361 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005362
Devesh Sharmad114f992014-06-10 19:32:15 +05305363 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005364 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005365 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005366
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005367 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005368
Ajit Khaparde57841862011-04-06 18:08:43 +00005369 be_cmd_reset_function(adapter);
5370
Sathya Perla82456b02010-02-17 01:35:37 +00005371 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005372}
5373
Sathya Perlacf588472010-02-14 21:22:01 +00005374static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305375 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005376{
5377 struct be_adapter *adapter = pci_get_drvdata(pdev);
5378 struct net_device *netdev = adapter->netdev;
5379
5380 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5381
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005382 if (!adapter->eeh_error) {
5383 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005384
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005385 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005386
Sathya Perlacf588472010-02-14 21:22:01 +00005387 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005388 netif_device_detach(netdev);
5389 if (netif_running(netdev))
5390 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005391 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005392
5393 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005394 }
Sathya Perlacf588472010-02-14 21:22:01 +00005395
5396 if (state == pci_channel_io_perm_failure)
5397 return PCI_ERS_RESULT_DISCONNECT;
5398
5399 pci_disable_device(pdev);
5400
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005401 /* The error could cause the FW to trigger a flash debug dump.
5402 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005403 * can cause it not to recover; wait for it to finish.
5404 * Wait only for first function as it is needed only once per
5405 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005406 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005407 if (pdev->devfn == 0)
5408 ssleep(30);
5409
Sathya Perlacf588472010-02-14 21:22:01 +00005410 return PCI_ERS_RESULT_NEED_RESET;
5411}
5412
5413static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5414{
5415 struct be_adapter *adapter = pci_get_drvdata(pdev);
5416 int status;
5417
5418 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005419
5420 status = pci_enable_device(pdev);
5421 if (status)
5422 return PCI_ERS_RESULT_DISCONNECT;
5423
5424 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005425 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005426 pci_restore_state(pdev);
5427
5428 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005429 dev_info(&adapter->pdev->dev,
5430 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005431 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005432 if (status)
5433 return PCI_ERS_RESULT_DISCONNECT;
5434
Sathya Perlad6b6d982012-09-05 01:56:48 +00005435 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005436 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005437 return PCI_ERS_RESULT_RECOVERED;
5438}
5439
5440static void be_eeh_resume(struct pci_dev *pdev)
5441{
5442 int status = 0;
5443 struct be_adapter *adapter = pci_get_drvdata(pdev);
5444 struct net_device *netdev = adapter->netdev;
5445
5446 dev_info(&adapter->pdev->dev, "EEH resume\n");
5447
5448 pci_save_state(pdev);
5449
Kalesh AP2d177be2013-04-28 22:22:29 +00005450 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005451 if (status)
5452 goto err;
5453
Kalesh AP03a58ba2014-05-13 14:03:11 +05305454 /* On some BE3 FW versions, after a HW reset,
5455 * interrupts will remain disabled for each function.
5456 * So, explicitly enable interrupts
5457 */
5458 be_intr_set(adapter, true);
5459
Kalesh AP2d177be2013-04-28 22:22:29 +00005460 /* tell fw we're ready to fire cmds */
5461 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005462 if (status)
5463 goto err;
5464
Sathya Perlacf588472010-02-14 21:22:01 +00005465 status = be_setup(adapter);
5466 if (status)
5467 goto err;
5468
5469 if (netif_running(netdev)) {
5470 status = be_open(netdev);
5471 if (status)
5472 goto err;
5473 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005474
5475 schedule_delayed_work(&adapter->func_recovery_work,
5476 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005477 netif_device_attach(netdev);
5478 return;
5479err:
5480 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005481}
5482
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005483static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005484 .error_detected = be_eeh_err_detected,
5485 .slot_reset = be_eeh_reset,
5486 .resume = be_eeh_resume,
5487};
5488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005489static struct pci_driver be_driver = {
5490 .name = DRV_NAME,
5491 .id_table = be_dev_ids,
5492 .probe = be_probe,
5493 .remove = be_remove,
5494 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005495 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005496 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005497 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005498};
5499
5500static int __init be_init_module(void)
5501{
Joe Perches8e95a202009-12-03 07:58:21 +00005502 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5503 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005504 printk(KERN_WARNING DRV_NAME
5505 " : Module param rx_frag_size must be 2048/4096/8192."
5506 " Using 2048\n");
5507 rx_frag_size = 2048;
5508 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005509
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005510 return pci_register_driver(&be_driver);
5511}
5512module_init(be_init_module);
5513
5514static void __exit be_exit_module(void)
5515{
5516 pci_unregister_driver(&be_driver);
5517}
5518module_exit(be_exit_module);