blob: a75eb74872406488e5e718629a17b348b397aa33 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666{
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 struct be_tx_stats *stats = tx_stats(txo);
668
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000688 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689}
690
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530692 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100697 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
Sathya Perlac9c47142014-03-27 10:46:19 +0530707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Sathya Perlac9c47142014-03-27 10:46:19 +0530724 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530736 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100748 if (skb_vlan_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 }
753
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530765 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000772 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000773 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000776 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000778 }
779}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000786 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500787 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000789 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 queue_head_inc(txq);
799
David S. Millerebc8d2a2009-06-09 01:01:31 -0700800 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700801 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530802
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000805 goto dma_err;
806 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ian Campbellb061b392011-08-29 23:18:23 +0000817 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000818 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000820 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 }
827
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla7101e112010-03-22 20:41:12 +0000838dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 while (copied) {
846 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000847 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 map_single = false;
849 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530850 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000851 queue_head_inc(txq);
852 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500853 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000854 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855}
856
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500857static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
858{
859 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
860}
861
Somnath Kotur93040ae2012-06-26 22:32:10 +0000862static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000863 struct sk_buff *skb,
864 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000865{
866 u16 vlan_tag = 0;
867
868 skb = skb_share_check(skb, GFP_ATOMIC);
869 if (unlikely(!skb))
870 return skb;
871
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100872 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000873 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530874
875 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
876 if (!vlan_tag)
877 vlan_tag = adapter->pvid;
878 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
879 * skip VLAN insertion
880 */
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000884
885 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100886 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
887 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888 if (unlikely(!skb))
889 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000890 skb->vlan_tci = 0;
891 }
892
893 /* Insert the outer VLAN, if any */
894 if (adapter->qnq_vid) {
895 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100896 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
897 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000898 if (unlikely(!skb))
899 return skb;
900 if (skip_hw_vlan)
901 *skip_hw_vlan = true;
902 }
903
Somnath Kotur93040ae2012-06-26 22:32:10 +0000904 return skb;
905}
906
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000907static bool be_ipv6_exthdr_check(struct sk_buff *skb)
908{
909 struct ethhdr *eh = (struct ethhdr *)skb->data;
910 u16 offset = ETH_HLEN;
911
912 if (eh->h_proto == htons(ETH_P_IPV6)) {
913 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
914
915 offset += sizeof(struct ipv6hdr);
916 if (ip6h->nexthdr != NEXTHDR_TCP &&
917 ip6h->nexthdr != NEXTHDR_UDP) {
918 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530919 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000920
921 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
922 if (ehdr->hdrlen == 0xff)
923 return true;
924 }
925 }
926 return false;
927}
928
929static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
930{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100931 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000932}
933
Sathya Perla748b5392014-05-09 13:29:13 +0530934static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000935{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000936 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000937}
938
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530939static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
940 struct sk_buff *skb,
941 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000943 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000944 unsigned int eth_hdr_len;
945 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000946
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000947 /* For padded packets, BE HW modifies tot_len field in IP header
948 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000949 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000950 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000951 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
952 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000953 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100954 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000955 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000956 ip = (struct iphdr *)ip_hdr(skb);
957 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
958 }
959
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530961 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000962 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530963 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530965 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966
Somnath Kotur93040ae2012-06-26 22:32:10 +0000967 /* HW has a bug wherein it will calculate CSUM for VLAN
968 * pkts even though it is disabled.
969 * Manually insert VLAN in pkt.
970 */
971 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100972 skb_vlan_tag_present(skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000973 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000974 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530975 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000976 }
977
978 /* HW may lockup when VLAN HW tagging is requested on
979 * certain ipv6 packets. Drop such pkts if the HW workaround to
980 * skip HW tagging is not enabled by FW.
981 */
982 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530983 (adapter->pvid || adapter->qnq_vid) &&
984 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000985 goto tx_drop;
986
987 /* Manual VLAN tag insertion to prevent:
988 * ASIC lockup when the ASIC inserts VLAN tag into
989 * certain ipv6 packets. Insert VLAN tags in driver,
990 * and set event, completion, vlan bits accordingly
991 * in the Tx WRB.
992 */
993 if (be_ipv6_tx_stall_chk(adapter, skb) &&
994 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000995 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000996 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000998 }
999
Sathya Perlaee9c7992013-05-22 23:04:55 +00001000 return skb;
1001tx_drop:
1002 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301003err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 return NULL;
1005}
1006
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301007static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1008 struct sk_buff *skb,
1009 bool *skip_hw_vlan)
1010{
1011 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1012 * less may cause a transmit stall on that port. So the work-around is
1013 * to pad short packets (<= 32 bytes) to a 36-byte length.
1014 */
1015 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001016 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301017 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301018 }
1019
1020 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1021 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1022 if (!skb)
1023 return NULL;
1024 }
1025
1026 return skb;
1027}
1028
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001029static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1030{
1031 struct be_queue_info *txq = &txo->q;
1032 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1033
1034 /* Mark the last request eventable if it hasn't been marked already */
1035 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1036 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1037
1038 /* compose a dummy wrb if there are odd set of wrbs to notify */
1039 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1040 wrb_fill(queue_head_node(txq), 0, 0);
1041 queue_head_inc(txq);
1042 atomic_inc(&txq->used);
1043 txo->pend_wrb_cnt++;
1044 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1045 TX_HDR_WRB_NUM_SHIFT);
1046 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1047 TX_HDR_WRB_NUM_SHIFT);
1048 }
1049 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1050 txo->pend_wrb_cnt = 0;
1051}
1052
Sathya Perlaee9c7992013-05-22 23:04:55 +00001053static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1054{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001055 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001057 u16 q_idx = skb_get_queue_mapping(skb);
1058 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001060 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001061
1062 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001063 if (unlikely(!skb))
1064 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001065
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001066 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1067 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001069 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001071
1072 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1073 netif_stop_subqueue(netdev, q_idx);
1074 tx_stats(txo)->tx_stops++;
1075 }
1076
1077 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1078 be_xmit_flush(adapter, txo);
1079
1080 return NETDEV_TX_OK;
1081drop:
1082 tx_stats(txo)->tx_drv_drops++;
1083 /* Flush the already enqueued tx requests */
1084 if (flush && txo->pend_wrb_cnt)
1085 be_xmit_flush(adapter, txo);
1086
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 return NETDEV_TX_OK;
1088}
1089
1090static int be_change_mtu(struct net_device *netdev, int new_mtu)
1091{
1092 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301093 struct device *dev = &adapter->pdev->dev;
1094
1095 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1096 dev_info(dev, "MTU must be between %d and %d bytes\n",
1097 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 return -EINVAL;
1099 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301100
1101 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301102 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 netdev->mtu = new_mtu;
1104 return 0;
1105}
1106
1107/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001108 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1109 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110 */
Sathya Perla10329df2012-06-05 19:37:18 +00001111static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112{
Vasundhara Volam50762662014-09-12 17:39:14 +05301113 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001114 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301115 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001116 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001117
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001118 /* No need to further configure vids if in promiscuous mode */
1119 if (adapter->promiscuous)
1120 return 0;
1121
Sathya Perla92bf14a2013-08-27 16:57:32 +05301122 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001123 goto set_vlan_promisc;
1124
1125 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301126 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1127 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001128
Kalesh AP4d567d92014-05-09 13:29:17 +05301129 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001130 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001131 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301132 if (addl_status(status) ==
1133 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001134 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301135 dev_err(dev, "Setting HW VLAN filtering failed\n");
Sathya Perlaac34b742015-02-06 08:18:40 -05001136 } else if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1137 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS,
1138 OFF);
1139 if (!status) {
1140 dev_info(dev, "Disabling VLAN Promiscuous mode\n");
1141 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001142 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001144
Sathya Perlab31c50a2009-09-17 10:30:13 -07001145 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001146
1147set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301148 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1149 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001150
Sathya Perlaac34b742015-02-06 08:18:40 -05001151 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001152 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301153 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001154 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1155 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301156 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001157 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158}
1159
Patrick McHardy80d5c362013-04-19 02:04:28 +00001160static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161{
1162 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001163 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001165 /* Packets with VID 0 are always received by Lancer by default */
1166 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301167 return status;
1168
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301169 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301170 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001171
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301172 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301173 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001174
Somnath Kotura6b74e02014-01-21 15:50:55 +05301175 status = be_vid_config(adapter);
1176 if (status) {
1177 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301178 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301179 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301180
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001181 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182}
1183
Patrick McHardy80d5c362013-04-19 02:04:28 +00001184static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185{
1186 struct be_adapter *adapter = netdev_priv(netdev);
1187
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001188 /* Packets with VID 0 are always received by Lancer by default */
1189 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301190 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001191
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301192 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301193 adapter->vlans_added--;
1194
1195 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196}
1197
Somnath kotur7ad09452014-03-03 14:24:43 +05301198static void be_clear_promisc(struct be_adapter *adapter)
1199{
1200 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301201 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301202
Sathya Perlaac34b742015-02-06 08:18:40 -05001203 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Somnath kotur7ad09452014-03-03 14:24:43 +05301204}
1205
Sathya Perlaa54769f2011-10-24 02:45:00 +00001206static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207{
1208 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001209 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210
1211 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaac34b742015-02-06 08:18:40 -05001212 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001213 adapter->promiscuous = true;
1214 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001216
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001217 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001218 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301219 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001220 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001221 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001222 }
1223
Sathya Perlae7b909a2009-11-22 22:01:10 +00001224 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001225 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301226 netdev_mc_count(netdev) > be_max_mc(adapter))
1227 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001228
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001229 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1230 struct netdev_hw_addr *ha;
1231 int i = 1; /* First slot is claimed by the Primary MAC */
1232
1233 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1234 be_cmd_pmac_del(adapter, adapter->if_handle,
1235 adapter->pmac_id[i], 0);
1236 }
1237
Sathya Perla92bf14a2013-08-27 16:57:32 +05301238 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Sathya Perlaac34b742015-02-06 08:18:40 -05001239 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS,
1240 ON);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001241 adapter->promiscuous = true;
1242 goto done;
1243 }
1244
1245 netdev_for_each_uc_addr(ha, adapter->netdev) {
1246 adapter->uc_macs++; /* First slot is for Primary MAC */
1247 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1248 adapter->if_handle,
1249 &adapter->pmac_id[adapter->uc_macs], 0);
1250 }
1251 }
1252
Sathya Perlaac34b742015-02-06 08:18:40 -05001253 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301254 if (!status) {
1255 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1257 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001258 }
Kalesh APa0794882014-05-30 19:06:23 +05301259
1260set_mcast_promisc:
1261 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1262 return;
1263
1264 /* Set to MCAST promisc mode if setting MULTICAST address fails
1265 * or if num configured exceeds what we support
1266 */
Sathya Perlaac34b742015-02-06 08:18:40 -05001267 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301268 if (!status)
1269 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001270done:
1271 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272}
1273
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001274static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1275{
1276 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001278 int status;
1279
Sathya Perla11ac75e2011-12-13 00:58:50 +00001280 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001281 return -EPERM;
1282
Sathya Perla11ac75e2011-12-13 00:58:50 +00001283 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001284 return -EINVAL;
1285
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301286 /* Proceed further only if user provided MAC is different
1287 * from active MAC
1288 */
1289 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1290 return 0;
1291
Sathya Perla3175d8c2013-07-23 15:25:03 +05301292 if (BEx_chip(adapter)) {
1293 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1294 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001295
Sathya Perla11ac75e2011-12-13 00:58:50 +00001296 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1297 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301298 } else {
1299 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1300 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001301 }
1302
Kalesh APabccf232014-07-17 16:20:24 +05301303 if (status) {
1304 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1305 mac, vf, status);
1306 return be_cmd_status(status);
1307 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001308
Kalesh APabccf232014-07-17 16:20:24 +05301309 ether_addr_copy(vf_cfg->mac_addr, mac);
1310
1311 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001312}
1313
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001314static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301315 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001316{
1317 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319
Sathya Perla11ac75e2011-12-13 00:58:50 +00001320 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001321 return -EPERM;
1322
Sathya Perla11ac75e2011-12-13 00:58:50 +00001323 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001324 return -EINVAL;
1325
1326 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001327 vi->max_tx_rate = vf_cfg->tx_rate;
1328 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001329 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1330 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001331 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301332 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001333
1334 return 0;
1335}
1336
Sathya Perla748b5392014-05-09 13:29:13 +05301337static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001338{
1339 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001340 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 int status = 0;
1342
Sathya Perla11ac75e2011-12-13 00:58:50 +00001343 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001344 return -EPERM;
1345
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001346 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001347 return -EINVAL;
1348
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001349 if (vlan || qos) {
1350 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301351 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001352 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1353 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001354 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001355 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301356 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1357 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001358 }
1359
Kalesh APabccf232014-07-17 16:20:24 +05301360 if (status) {
1361 dev_err(&adapter->pdev->dev,
1362 "VLAN %d config on VF %d failed : %#x\n", vlan,
1363 vf, status);
1364 return be_cmd_status(status);
1365 }
1366
1367 vf_cfg->vlan_tag = vlan;
1368
1369 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001370}
1371
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001372static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1373 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001374{
1375 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301376 struct device *dev = &adapter->pdev->dev;
1377 int percent_rate, status = 0;
1378 u16 link_speed = 0;
1379 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001380
Sathya Perla11ac75e2011-12-13 00:58:50 +00001381 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001382 return -EPERM;
1383
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001384 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001385 return -EINVAL;
1386
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001387 if (min_tx_rate)
1388 return -EINVAL;
1389
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301390 if (!max_tx_rate)
1391 goto config_qos;
1392
1393 status = be_cmd_link_status_query(adapter, &link_speed,
1394 &link_status, 0);
1395 if (status)
1396 goto err;
1397
1398 if (!link_status) {
1399 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301400 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301401 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001402 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001403
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301404 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1405 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1406 link_speed);
1407 status = -EINVAL;
1408 goto err;
1409 }
1410
1411 /* On Skyhawk the QOS setting must be done only as a % value */
1412 percent_rate = link_speed / 100;
1413 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1414 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1415 percent_rate);
1416 status = -EINVAL;
1417 goto err;
1418 }
1419
1420config_qos:
1421 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001422 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301423 goto err;
1424
1425 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1426 return 0;
1427
1428err:
1429 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1430 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301431 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001432}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301433
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301434static int be_set_vf_link_state(struct net_device *netdev, int vf,
1435 int link_state)
1436{
1437 struct be_adapter *adapter = netdev_priv(netdev);
1438 int status;
1439
1440 if (!sriov_enabled(adapter))
1441 return -EPERM;
1442
1443 if (vf >= adapter->num_vfs)
1444 return -EINVAL;
1445
1446 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301447 if (status) {
1448 dev_err(&adapter->pdev->dev,
1449 "Link state change on VF %d failed: %#x\n", vf, status);
1450 return be_cmd_status(status);
1451 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301452
Kalesh APabccf232014-07-17 16:20:24 +05301453 adapter->vf_cfg[vf].plink_tracking = link_state;
1454
1455 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301456}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001457
Sathya Perla2632baf2013-10-01 16:00:00 +05301458static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1459 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460{
Sathya Perla2632baf2013-10-01 16:00:00 +05301461 aic->rx_pkts_prev = rx_pkts;
1462 aic->tx_reqs_prev = tx_pkts;
1463 aic->jiffies = now;
1464}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001465
Sathya Perla2632baf2013-10-01 16:00:00 +05301466static void be_eqd_update(struct be_adapter *adapter)
1467{
1468 struct be_set_eqd set_eqd[MAX_EVT_QS];
1469 int eqd, i, num = 0, start;
1470 struct be_aic_obj *aic;
1471 struct be_eq_obj *eqo;
1472 struct be_rx_obj *rxo;
1473 struct be_tx_obj *txo;
1474 u64 rx_pkts, tx_pkts;
1475 ulong now;
1476 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477
Sathya Perla2632baf2013-10-01 16:00:00 +05301478 for_all_evt_queues(adapter, eqo, i) {
1479 aic = &adapter->aic_obj[eqo->idx];
1480 if (!aic->enable) {
1481 if (aic->jiffies)
1482 aic->jiffies = 0;
1483 eqd = aic->et_eqd;
1484 goto modify_eqd;
1485 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486
Sathya Perla2632baf2013-10-01 16:00:00 +05301487 rxo = &adapter->rx_obj[eqo->idx];
1488 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001489 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301490 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001491 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001492
Sathya Perla2632baf2013-10-01 16:00:00 +05301493 txo = &adapter->tx_obj[eqo->idx];
1494 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001495 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301496 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001497 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001498
Sathya Perla2632baf2013-10-01 16:00:00 +05301499 /* Skip, if wrapped around or first calculation */
1500 now = jiffies;
1501 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1502 rx_pkts < aic->rx_pkts_prev ||
1503 tx_pkts < aic->tx_reqs_prev) {
1504 be_aic_update(aic, rx_pkts, tx_pkts, now);
1505 continue;
1506 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001507
Sathya Perla2632baf2013-10-01 16:00:00 +05301508 delta = jiffies_to_msecs(now - aic->jiffies);
1509 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1510 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1511 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001512
Sathya Perla2632baf2013-10-01 16:00:00 +05301513 if (eqd < 8)
1514 eqd = 0;
1515 eqd = min_t(u32, eqd, aic->max_eqd);
1516 eqd = max_t(u32, eqd, aic->min_eqd);
1517
1518 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301520 if (eqd != aic->prev_eqd) {
1521 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1522 set_eqd[num].eq_id = eqo->q.id;
1523 aic->prev_eqd = eqd;
1524 num++;
1525 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001526 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301527
1528 if (num)
1529 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001530}
1531
Sathya Perla3abcded2010-10-03 22:12:27 -07001532static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301533 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001534{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001535 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001536
Sathya Perlaab1594e2011-07-25 19:10:15 +00001537 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001541 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001542 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001543 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001544 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001545 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546}
1547
Sathya Perla2e588f82011-03-11 02:49:26 +00001548static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001549{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001550 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301551 * Also ignore ipcksm for ipv6 pkts
1552 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001553 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301554 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001555}
1556
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301557static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001561 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301562 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563
Sathya Perla3abcded2010-10-03 22:12:27 -07001564 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 BUG_ON(!rx_page_info->page);
1566
Sathya Perlae50287b2014-03-04 12:14:38 +05301567 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001568 dma_unmap_page(&adapter->pdev->dev,
1569 dma_unmap_addr(rx_page_info, bus),
1570 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301571 rx_page_info->last_frag = false;
1572 } else {
1573 dma_sync_single_for_cpu(&adapter->pdev->dev,
1574 dma_unmap_addr(rx_page_info, bus),
1575 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001576 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301578 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 atomic_dec(&rxq->used);
1580 return rx_page_info;
1581}
1582
1583/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001584static void be_rx_compl_discard(struct be_rx_obj *rxo,
1585 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001588 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001590 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301591 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001592 put_page(page_info->page);
1593 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 }
1595}
1596
1597/*
1598 * skb_fill_rx_data forms a complete skb for an ether frame
1599 * indicated by rxcp.
1600 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1602 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001605 u16 i, j;
1606 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 u8 *start;
1608
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301609 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 start = page_address(page_info->page) + page_info->page_offset;
1611 prefetch(start);
1612
1613 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001614 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 skb->len = curr_frag_len;
1617 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001618 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 /* Complete packet has now been moved to data */
1620 put_page(page_info->page);
1621 skb->data_len = 0;
1622 skb->tail += curr_frag_len;
1623 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001624 hdr_len = ETH_HLEN;
1625 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001627 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 skb_shinfo(skb)->frags[0].page_offset =
1629 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301630 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1631 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001633 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 skb->tail += hdr_len;
1635 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001636 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perla2e588f82011-03-11 02:49:26 +00001638 if (rxcp->pkt_size <= rx_frag_size) {
1639 BUG_ON(rxcp->num_rcvd != 1);
1640 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 }
1642
1643 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001644 remaining = rxcp->pkt_size - curr_frag_len;
1645 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301646 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001647 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001649 /* Coalesce all frags from the same physical page in one slot */
1650 if (page_info->page_offset == 0) {
1651 /* Fresh page */
1652 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001653 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001654 skb_shinfo(skb)->frags[j].page_offset =
1655 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001656 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001657 skb_shinfo(skb)->nr_frags++;
1658 } else {
1659 put_page(page_info->page);
1660 }
1661
Eric Dumazet9e903e02011-10-18 21:00:24 +00001662 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 skb->len += curr_frag_len;
1664 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001665 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001666 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001667 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001669 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001672/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301673static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001677 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001679
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001680 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001681 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001682 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 return;
1685 }
1686
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001687 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001689 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001691 else
1692 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001694 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001695 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001696 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001697 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301698
Tom Herbertb6c0e892014-08-27 21:27:17 -07001699 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301700 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701
Jiri Pirko343e43c2011-08-25 02:50:51 +00001702 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001703 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001704
1705 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706}
1707
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001709static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1710 struct napi_struct *napi,
1711 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001713 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001715 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001716 u16 remaining, curr_frag_len;
1717 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001718
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001719 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001720 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001721 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001722 return;
1723 }
1724
Sathya Perla2e588f82011-03-11 02:49:26 +00001725 remaining = rxcp->pkt_size;
1726 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301727 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728
1729 curr_frag_len = min(remaining, rx_frag_size);
1730
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001731 /* Coalesce all frags from the same physical page in one slot */
1732 if (i == 0 || page_info->page_offset == 0) {
1733 /* First frag or Fresh page */
1734 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001735 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001736 skb_shinfo(skb)->frags[j].page_offset =
1737 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001738 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001739 } else {
1740 put_page(page_info->page);
1741 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001742 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001743 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 memset(page_info, 0, sizeof(*page_info));
1746 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001747 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001749 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001750 skb->len = rxcp->pkt_size;
1751 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001752 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001753 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001754 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001755 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301756
Tom Herbertb6c0e892014-08-27 21:27:17 -07001757 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301758 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001759
Jiri Pirko343e43c2011-08-25 02:50:51 +00001760 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001761 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001762
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001763 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764}
1765
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001766static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1767 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301769 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1770 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1771 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1772 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1773 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1774 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1775 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1776 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1777 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1778 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1779 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001780 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301781 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1782 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001783 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301784 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301785 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301786 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001787}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001789static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1790 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001791{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301792 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1793 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1794 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1795 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1796 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1797 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1798 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1799 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1800 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1801 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1802 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001803 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301804 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1805 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001806 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301807 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1808 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001809}
1810
1811static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1812{
1813 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1814 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1815 struct be_adapter *adapter = rxo->adapter;
1816
1817 /* For checking the valid bit it is Ok to use either definition as the
1818 * valid bit is at the same position in both v0 and v1 Rx compl */
1819 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820 return NULL;
1821
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001822 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001823 be_dws_le_to_cpu(compl, sizeof(*compl));
1824
1825 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001827 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001828 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001829
Somnath Koture38b1702013-05-29 22:55:56 +00001830 if (rxcp->ip_frag)
1831 rxcp->l4_csum = 0;
1832
Sathya Perla15d72182011-03-21 20:49:26 +00001833 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301834 /* In QNQ modes, if qnq bit is not set, then the packet was
1835 * tagged only with the transparent outer vlan-tag and must
1836 * not be treated as a vlan packet by host
1837 */
1838 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001839 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001840
Sathya Perla15d72182011-03-21 20:49:26 +00001841 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001842 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001843
Somnath Kotur939cf302011-08-18 21:51:49 -07001844 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301845 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001846 rxcp->vlanf = 0;
1847 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001848
1849 /* As the compl has been parsed, reset it; we wont touch it again */
1850 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 return rxcp;
1854}
1855
Eric Dumazet1829b082011-03-01 05:48:12 +00001856static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001859
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001861 gfp |= __GFP_COMP;
1862 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863}
1864
1865/*
1866 * Allocate a page, split it to fragments of size rx_frag_size and post as
1867 * receive buffers to BE
1868 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301869static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870{
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001872 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001875 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 struct be_eth_rx_d *rxd;
1877 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301878 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301881 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001883 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001885 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 break;
1887 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001888 page_dmaaddr = dma_map_page(dev, pagep, 0,
1889 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001890 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001891 if (dma_mapping_error(dev, page_dmaaddr)) {
1892 put_page(pagep);
1893 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301894 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001895 break;
1896 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301897 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 } else {
1899 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301902 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
1905 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301906 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1908 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
1910 /* Any space left in the current big page for another frag? */
1911 if ((page_offset + rx_frag_size + rx_frag_size) >
1912 adapter->big_page_size) {
1913 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301914 page_info->last_frag = true;
1915 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1916 } else {
1917 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001919
1920 prev_page_info = page_info;
1921 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301924
1925 /* Mark the last frag of a page when we break out of the above loop
1926 * with no more slots available in the RXQ
1927 */
1928 if (pagep) {
1929 prev_page_info->last_frag = true;
1930 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1931 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932
1933 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301935 if (rxo->rx_post_starved)
1936 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301937 do {
1938 notify = min(256u, posted);
1939 be_rxq_notify(adapter, rxq->id, notify);
1940 posted -= notify;
1941 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001942 } else if (atomic_read(&rxq->used) == 0) {
1943 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946}
1947
Sathya Perla5fb379e2009-06-18 00:02:59 +00001948static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1951
1952 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1953 return NULL;
1954
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001955 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1957
1958 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1959
1960 queue_tail_inc(tx_cq);
1961 return txcp;
1962}
1963
Sathya Perla3c8def92011-06-12 20:01:58 +00001964static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301965 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966{
Sathya Perla3c8def92011-06-12 20:01:58 +00001967 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001968 struct be_queue_info *txq = &txo->q;
1969 u16 frag_index, num_wrbs = 0;
1970 struct sk_buff *skb = NULL;
1971 bool unmap_skb_hdr = false;
1972 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001974 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001975 if (sent_skbs[txq->tail]) {
1976 /* Free skb from prev req */
1977 if (skb)
1978 dev_consume_skb_any(skb);
1979 skb = sent_skbs[txq->tail];
1980 sent_skbs[txq->tail] = NULL;
1981 queue_tail_inc(txq); /* skip hdr wrb */
1982 num_wrbs++;
1983 unmap_skb_hdr = true;
1984 }
Alexander Duycka73b7962009-12-02 16:48:18 +00001985 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001986 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001987 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001988 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001989 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001991 num_wrbs++;
1992 } while (frag_index != last_index);
1993 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001995 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996}
1997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998/* Return the number of events in the event queue */
1999static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002000{
2001 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 do {
2005 eqe = queue_tail_node(&eqo->q);
2006 if (eqe->evt == 0)
2007 break;
2008
2009 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002010 eqe->evt = 0;
2011 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012 queue_tail_inc(&eqo->q);
2013 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002014
2015 return num;
2016}
2017
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018/* Leaves the EQ is disarmed state */
2019static void be_eq_clean(struct be_eq_obj *eqo)
2020{
2021 int num = events_get(eqo);
2022
2023 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2024}
2025
2026static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
2028 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002029 struct be_queue_info *rxq = &rxo->q;
2030 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002031 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002032 struct be_adapter *adapter = rxo->adapter;
2033 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perlad23e9462012-12-17 19:38:51 +00002035 /* Consume pending rx completions.
2036 * Wait for the flush completion (identified by zero num_rcvd)
2037 * to arrive. Notify CQ even when there are no more CQ entries
2038 * for HW to flush partially coalesced CQ entries.
2039 * In Lancer, there is no need to wait for flush compl.
2040 */
2041 for (;;) {
2042 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302043 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002044 if (lancer_chip(adapter))
2045 break;
2046
2047 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2048 dev_warn(&adapter->pdev->dev,
2049 "did not receive flush compl\n");
2050 break;
2051 }
2052 be_cq_notify(adapter, rx_cq->id, true, 0);
2053 mdelay(1);
2054 } else {
2055 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002056 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002057 if (rxcp->num_rcvd == 0)
2058 break;
2059 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 }
2061
Sathya Perlad23e9462012-12-17 19:38:51 +00002062 /* After cleanup, leave the CQ in unarmed state */
2063 be_cq_notify(adapter, rx_cq->id, false, 0);
2064
2065 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302066 while (atomic_read(&rxq->used) > 0) {
2067 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068 put_page(page_info->page);
2069 memset(page_info, 0, sizeof(*page_info));
2070 }
2071 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302072 rxq->tail = 0;
2073 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074}
2075
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002076static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002078 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2079 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002080 struct be_tx_obj *txo;
2081 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002082 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002083 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302085 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002086 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002087 pending_txqs = adapter->num_tx_qs;
2088
2089 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302090 cmpl = 0;
2091 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002092 txq = &txo->q;
2093 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302094 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002095 num_wrbs += be_tx_compl_process(adapter, txo,
2096 end_idx);
2097 cmpl++;
2098 }
2099 if (cmpl) {
2100 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2101 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302102 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002103 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002104 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002105 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002106 }
2107
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302108 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002109 break;
2110
2111 mdelay(1);
2112 } while (true);
2113
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002114 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002115 for_all_tx_queues(adapter, txo, i) {
2116 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002117
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002118 if (atomic_read(&txq->used)) {
2119 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2120 i, atomic_read(&txq->used));
2121 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002122 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002123 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2124 txq->len);
2125 /* Use the tx-compl process logic to handle requests
2126 * that were not sent to the HW.
2127 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002128 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2129 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002130 BUG_ON(atomic_read(&txq->used));
2131 txo->pend_wrb_cnt = 0;
2132 /* Since hw was never notified of these requests,
2133 * reset TXQ indices
2134 */
2135 txq->head = notified_idx;
2136 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002137 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002138 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139}
2140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141static void be_evt_queues_destroy(struct be_adapter *adapter)
2142{
2143 struct be_eq_obj *eqo;
2144 int i;
2145
2146 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002147 if (eqo->q.created) {
2148 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302150 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302151 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002152 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153 be_queue_free(adapter, &eqo->q);
2154 }
2155}
2156
2157static int be_evt_queues_create(struct be_adapter *adapter)
2158{
2159 struct be_queue_info *eq;
2160 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302161 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 int i, rc;
2163
Sathya Perla92bf14a2013-08-27 16:57:32 +05302164 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2165 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166
2167 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302168 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2169 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302170 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302171 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002172 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302174 aic->max_eqd = BE_MAX_EQD;
2175 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176
2177 eq = &eqo->q;
2178 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302179 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002180 if (rc)
2181 return rc;
2182
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302183 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184 if (rc)
2185 return rc;
2186 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002187 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188}
2189
Sathya Perla5fb379e2009-06-18 00:02:59 +00002190static void be_mcc_queues_destroy(struct be_adapter *adapter)
2191{
2192 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193
Sathya Perla8788fdc2009-07-27 22:52:03 +00002194 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002195 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002196 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002197 be_queue_free(adapter, q);
2198
Sathya Perla8788fdc2009-07-27 22:52:03 +00002199 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002201 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002202 be_queue_free(adapter, q);
2203}
2204
2205/* Must be called only after TX qs are created as MCC shares TX EQ */
2206static int be_mcc_queues_create(struct be_adapter *adapter)
2207{
2208 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002209
Sathya Perla8788fdc2009-07-27 22:52:03 +00002210 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002211 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302212 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002213 goto err;
2214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 /* Use the default EQ for MCC completions */
2216 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002217 goto mcc_cq_free;
2218
Sathya Perla8788fdc2009-07-27 22:52:03 +00002219 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002220 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2221 goto mcc_cq_destroy;
2222
Sathya Perla8788fdc2009-07-27 22:52:03 +00002223 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002224 goto mcc_q_free;
2225
2226 return 0;
2227
2228mcc_q_free:
2229 be_queue_free(adapter, q);
2230mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002231 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002232mcc_cq_free:
2233 be_queue_free(adapter, cq);
2234err:
2235 return -1;
2236}
2237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238static void be_tx_queues_destroy(struct be_adapter *adapter)
2239{
2240 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002241 struct be_tx_obj *txo;
2242 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243
Sathya Perla3c8def92011-06-12 20:01:58 +00002244 for_all_tx_queues(adapter, txo, i) {
2245 q = &txo->q;
2246 if (q->created)
2247 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2248 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249
Sathya Perla3c8def92011-06-12 20:01:58 +00002250 q = &txo->cq;
2251 if (q->created)
2252 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2253 be_queue_free(adapter, q);
2254 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255}
2256
Sathya Perla77071332013-08-27 16:57:34 +05302257static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002260 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302261 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262
Sathya Perla92bf14a2013-08-27 16:57:32 +05302263 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002264
Sathya Perla3c8def92011-06-12 20:01:58 +00002265 for_all_tx_queues(adapter, txo, i) {
2266 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2268 sizeof(struct be_eth_tx_compl));
2269 if (status)
2270 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271
John Stultz827da442013-10-07 15:51:58 -07002272 u64_stats_init(&txo->stats.sync);
2273 u64_stats_init(&txo->stats.sync_compl);
2274
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 /* If num_evt_qs is less than num_tx_qs, then more than
2276 * one txq share an eq
2277 */
2278 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2279 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2280 if (status)
2281 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2284 sizeof(struct be_eth_wrb));
2285 if (status)
2286 return status;
2287
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002288 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 if (status)
2290 return status;
2291 }
2292
Sathya Perlad3791422012-09-28 04:39:44 +00002293 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2294 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 return 0;
2296}
2297
2298static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299{
2300 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 struct be_rx_obj *rxo;
2302 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002305 q = &rxo->cq;
2306 if (q->created)
2307 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2308 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310}
2311
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002313{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 struct be_rx_obj *rxo;
2316 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317
Sathya Perla92bf14a2013-08-27 16:57:32 +05302318 /* We can create as many RSS rings as there are EQs. */
2319 adapter->num_rx_qs = adapter->num_evt_qs;
2320
2321 /* We'll use RSS only if atleast 2 RSS rings are supported.
2322 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302324 if (adapter->num_rx_qs > 1)
2325 adapter->num_rx_qs++;
2326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 for_all_rx_queues(adapter, rxo, i) {
2329 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002330 cq = &rxo->cq;
2331 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302332 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002333 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335
John Stultz827da442013-10-07 15:51:58 -07002336 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2338 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002341 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342
Sathya Perlad3791422012-09-28 04:39:44 +00002343 dev_info(&adapter->pdev->dev,
2344 "created %d RSS queue(s) and 1 default RX queue\n",
2345 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002346 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002347}
2348
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002349static irqreturn_t be_intx(int irq, void *dev)
2350{
Sathya Perlae49cc342012-11-27 19:50:02 +00002351 struct be_eq_obj *eqo = dev;
2352 struct be_adapter *adapter = eqo->adapter;
2353 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002355 /* IRQ is not expected when NAPI is scheduled as the EQ
2356 * will not be armed.
2357 * But, this can happen on Lancer INTx where it takes
2358 * a while to de-assert INTx or in BE2 where occasionaly
2359 * an interrupt may be raised even when EQ is unarmed.
2360 * If NAPI is already scheduled, then counting & notifying
2361 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002362 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002363 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002364 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002365 __napi_schedule(&eqo->napi);
2366 if (num_evts)
2367 eqo->spurious_intr = 0;
2368 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002369 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002370
2371 /* Return IRQ_HANDLED only for the the first spurious intr
2372 * after a valid intr to stop the kernel from branding
2373 * this irq as a bad one!
2374 */
2375 if (num_evts || eqo->spurious_intr++ == 0)
2376 return IRQ_HANDLED;
2377 else
2378 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379}
2380
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384
Sathya Perla0b545a62012-11-23 00:27:18 +00002385 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2386 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 return IRQ_HANDLED;
2388}
2389
Sathya Perla2e588f82011-03-11 02:49:26 +00002390static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391{
Somnath Koture38b1702013-05-29 22:55:56 +00002392 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393}
2394
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302396 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397{
Sathya Perla3abcded2010-10-03 22:12:27 -07002398 struct be_adapter *adapter = rxo->adapter;
2399 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002400 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302402 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403
2404 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002405 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406 if (!rxcp)
2407 break;
2408
Sathya Perla12004ae2011-08-02 19:57:46 +00002409 /* Is it a flush compl that has no data */
2410 if (unlikely(rxcp->num_rcvd == 0))
2411 goto loop_continue;
2412
2413 /* Discard compl with partial DMA Lancer B0 */
2414 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002416 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002417 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002418
Sathya Perla12004ae2011-08-02 19:57:46 +00002419 /* On BE drop pkts that arrive due to imperfect filtering in
2420 * promiscuous mode on some skews
2421 */
2422 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302423 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002425 goto loop_continue;
2426 }
2427
Sathya Perla6384a4d2013-10-25 10:40:16 +05302428 /* Don't do gro when we're busy_polling */
2429 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002431 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302432 be_rx_compl_process(rxo, napi, rxcp);
2433
Sathya Perla12004ae2011-08-02 19:57:46 +00002434loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302435 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002436 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 }
2438
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 if (work_done) {
2440 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002441
Sathya Perla6384a4d2013-10-25 10:40:16 +05302442 /* When an rx-obj gets into post_starved state, just
2443 * let be_worker do the posting.
2444 */
2445 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2446 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302447 be_post_rx_frags(rxo, GFP_ATOMIC,
2448 max_t(u32, MAX_RX_POST,
2449 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002452 return work_done;
2453}
2454
Kalesh AP512bb8a2014-09-02 09:56:49 +05302455static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2456{
2457 switch (status) {
2458 case BE_TX_COMP_HDR_PARSE_ERR:
2459 tx_stats(txo)->tx_hdr_parse_err++;
2460 break;
2461 case BE_TX_COMP_NDMA_ERR:
2462 tx_stats(txo)->tx_dma_err++;
2463 break;
2464 case BE_TX_COMP_ACL_ERR:
2465 tx_stats(txo)->tx_spoof_check_err++;
2466 break;
2467 }
2468}
2469
2470static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2471{
2472 switch (status) {
2473 case LANCER_TX_COMP_LSO_ERR:
2474 tx_stats(txo)->tx_tso_err++;
2475 break;
2476 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2477 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2478 tx_stats(txo)->tx_spoof_check_err++;
2479 break;
2480 case LANCER_TX_COMP_QINQ_ERR:
2481 tx_stats(txo)->tx_qinq_err++;
2482 break;
2483 case LANCER_TX_COMP_PARITY_ERR:
2484 tx_stats(txo)->tx_internal_parity_err++;
2485 break;
2486 case LANCER_TX_COMP_DMA_ERR:
2487 tx_stats(txo)->tx_dma_err++;
2488 break;
2489 }
2490}
2491
Sathya Perlac8f64612014-09-02 09:56:55 +05302492static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2493 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302496 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302497 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302498 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499
Sathya Perlac8f64612014-09-02 09:56:55 +05302500 while ((txcp = be_tx_compl_get(&txo->cq))) {
2501 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2502 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2503 work_done++;
2504
Kalesh AP512bb8a2014-09-02 09:56:49 +05302505 compl_status = GET_TX_COMPL_BITS(status, txcp);
2506 if (compl_status) {
2507 if (lancer_chip(adapter))
2508 lancer_update_tx_err(txo, compl_status);
2509 else
2510 be_update_tx_err(txo, compl_status);
2511 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 }
2513
2514 if (work_done) {
2515 be_cq_notify(adapter, txo->cq.id, true, work_done);
2516 atomic_sub(num_wrbs, &txo->q.used);
2517
2518 /* As Tx wrbs have been freed up, wake up netdev queue
2519 * if it was stopped due to lack of tx wrbs. */
2520 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302521 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002522 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002523 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002524
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2526 tx_stats(txo)->tx_compl += work_done;
2527 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2528 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529}
Sathya Perla3c8def92011-06-12 20:01:58 +00002530
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002531#ifdef CONFIG_NET_RX_BUSY_POLL
2532static inline bool be_lock_napi(struct be_eq_obj *eqo)
2533{
2534 bool status = true;
2535
2536 spin_lock(&eqo->lock); /* BH is already disabled */
2537 if (eqo->state & BE_EQ_LOCKED) {
2538 WARN_ON(eqo->state & BE_EQ_NAPI);
2539 eqo->state |= BE_EQ_NAPI_YIELD;
2540 status = false;
2541 } else {
2542 eqo->state = BE_EQ_NAPI;
2543 }
2544 spin_unlock(&eqo->lock);
2545 return status;
2546}
2547
2548static inline void be_unlock_napi(struct be_eq_obj *eqo)
2549{
2550 spin_lock(&eqo->lock); /* BH is already disabled */
2551
2552 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2553 eqo->state = BE_EQ_IDLE;
2554
2555 spin_unlock(&eqo->lock);
2556}
2557
2558static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2559{
2560 bool status = true;
2561
2562 spin_lock_bh(&eqo->lock);
2563 if (eqo->state & BE_EQ_LOCKED) {
2564 eqo->state |= BE_EQ_POLL_YIELD;
2565 status = false;
2566 } else {
2567 eqo->state |= BE_EQ_POLL;
2568 }
2569 spin_unlock_bh(&eqo->lock);
2570 return status;
2571}
2572
2573static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2574{
2575 spin_lock_bh(&eqo->lock);
2576
2577 WARN_ON(eqo->state & (BE_EQ_NAPI));
2578 eqo->state = BE_EQ_IDLE;
2579
2580 spin_unlock_bh(&eqo->lock);
2581}
2582
2583static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2584{
2585 spin_lock_init(&eqo->lock);
2586 eqo->state = BE_EQ_IDLE;
2587}
2588
2589static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2590{
2591 local_bh_disable();
2592
2593 /* It's enough to just acquire napi lock on the eqo to stop
2594 * be_busy_poll() from processing any queueus.
2595 */
2596 while (!be_lock_napi(eqo))
2597 mdelay(1);
2598
2599 local_bh_enable();
2600}
2601
2602#else /* CONFIG_NET_RX_BUSY_POLL */
2603
2604static inline bool be_lock_napi(struct be_eq_obj *eqo)
2605{
2606 return true;
2607}
2608
2609static inline void be_unlock_napi(struct be_eq_obj *eqo)
2610{
2611}
2612
2613static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2614{
2615 return false;
2616}
2617
2618static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2619{
2620}
2621
2622static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2623{
2624}
2625
2626static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2627{
2628}
2629#endif /* CONFIG_NET_RX_BUSY_POLL */
2630
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302631int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632{
2633 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2634 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002635 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302636 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302637 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002638
Sathya Perla0b545a62012-11-23 00:27:18 +00002639 num_evts = events_get(eqo);
2640
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302641 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2642 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643
Sathya Perla6384a4d2013-10-25 10:40:16 +05302644 if (be_lock_napi(eqo)) {
2645 /* This loop will iterate twice for EQ0 in which
2646 * completions of the last RXQ (default one) are also processed
2647 * For other EQs the loop iterates only once
2648 */
2649 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2650 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2651 max_work = max(work, max_work);
2652 }
2653 be_unlock_napi(eqo);
2654 } else {
2655 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002656 }
2657
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 if (is_mcc_eqo(eqo))
2659 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 if (max_work < budget) {
2662 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002663 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 } else {
2665 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002666 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002667 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002668 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002669}
2670
Sathya Perla6384a4d2013-10-25 10:40:16 +05302671#ifdef CONFIG_NET_RX_BUSY_POLL
2672static int be_busy_poll(struct napi_struct *napi)
2673{
2674 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2675 struct be_adapter *adapter = eqo->adapter;
2676 struct be_rx_obj *rxo;
2677 int i, work = 0;
2678
2679 if (!be_lock_busy_poll(eqo))
2680 return LL_FLUSH_BUSY;
2681
2682 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2683 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2684 if (work)
2685 break;
2686 }
2687
2688 be_unlock_busy_poll(eqo);
2689 return work;
2690}
2691#endif
2692
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002693void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002694{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002695 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2696 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002697 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302698 bool error_detected = false;
2699 struct device *dev = &adapter->pdev->dev;
2700 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002701
Sathya Perlad23e9462012-12-17 19:38:51 +00002702 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002703 return;
2704
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002705 if (lancer_chip(adapter)) {
2706 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2707 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2708 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302709 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002710 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302711 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302712 adapter->hw_error = true;
2713 /* Do not log error messages if its a FW reset */
2714 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2715 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2716 dev_info(dev, "Firmware update in progress\n");
2717 } else {
2718 error_detected = true;
2719 dev_err(dev, "Error detected in the card\n");
2720 dev_err(dev, "ERR: sliport status 0x%x\n",
2721 sliport_status);
2722 dev_err(dev, "ERR: sliport error1 0x%x\n",
2723 sliport_err1);
2724 dev_err(dev, "ERR: sliport error2 0x%x\n",
2725 sliport_err2);
2726 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002727 }
2728 } else {
2729 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302730 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002731 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302732 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002733 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302734 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002735 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302736 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002737
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002738 ue_lo = (ue_lo & ~ue_lo_mask);
2739 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002740
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302741 /* On certain platforms BE hardware can indicate spurious UEs.
2742 * Allow HW to stop working completely in case of a real UE.
2743 * Hence not setting the hw_error for UE detection.
2744 */
2745
2746 if (ue_lo || ue_hi) {
2747 error_detected = true;
2748 dev_err(dev,
2749 "Unrecoverable Error detected in the adapter");
2750 dev_err(dev, "Please reboot server to recover");
2751 if (skyhawk_chip(adapter))
2752 adapter->hw_error = true;
2753 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2754 if (ue_lo & 1)
2755 dev_err(dev, "UE: %s bit set\n",
2756 ue_status_low_desc[i]);
2757 }
2758 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2759 if (ue_hi & 1)
2760 dev_err(dev, "UE: %s bit set\n",
2761 ue_status_hi_desc[i]);
2762 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302763 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002764 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302765 if (error_detected)
2766 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002767}
2768
Sathya Perla8d56ff12009-11-22 22:02:26 +00002769static void be_msix_disable(struct be_adapter *adapter)
2770{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002771 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002772 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002773 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302774 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002775 }
2776}
2777
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002778static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002779{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002780 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002781 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002782
Sathya Perla92bf14a2013-08-27 16:57:32 +05302783 /* If RoCE is supported, program the max number of NIC vectors that
2784 * may be configured via set-channels, along with vectors needed for
2785 * RoCe. Else, just program the number we'll use initially.
2786 */
2787 if (be_roce_supported(adapter))
2788 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2789 2 * num_online_cpus());
2790 else
2791 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002792
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002793 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002794 adapter->msix_entries[i].entry = i;
2795
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002796 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2797 MIN_MSIX_VECTORS, num_vec);
2798 if (num_vec < 0)
2799 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002800
Sathya Perla92bf14a2013-08-27 16:57:32 +05302801 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2802 adapter->num_msix_roce_vec = num_vec / 2;
2803 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2804 adapter->num_msix_roce_vec);
2805 }
2806
2807 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2808
2809 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2810 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002811 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002812
2813fail:
2814 dev_warn(dev, "MSIx enable failed\n");
2815
2816 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2817 if (!be_physfn(adapter))
2818 return num_vec;
2819 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002820}
2821
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002822static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302823 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002824{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302825 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002826}
2827
2828static int be_msix_register(struct be_adapter *adapter)
2829{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830 struct net_device *netdev = adapter->netdev;
2831 struct be_eq_obj *eqo;
2832 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 for_all_evt_queues(adapter, eqo, i) {
2835 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2836 vec = be_msix_vec_get(adapter, eqo);
2837 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002838 if (status)
2839 goto err_msix;
2840 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002841
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002842 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002843err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002844 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2845 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2846 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302847 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002848 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849 return status;
2850}
2851
2852static int be_irq_register(struct be_adapter *adapter)
2853{
2854 struct net_device *netdev = adapter->netdev;
2855 int status;
2856
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002857 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858 status = be_msix_register(adapter);
2859 if (status == 0)
2860 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002861 /* INTx is not supported for VF */
2862 if (!be_physfn(adapter))
2863 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864 }
2865
Sathya Perlae49cc342012-11-27 19:50:02 +00002866 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002867 netdev->irq = adapter->pdev->irq;
2868 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002869 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002870 if (status) {
2871 dev_err(&adapter->pdev->dev,
2872 "INTx request IRQ failed - err %d\n", status);
2873 return status;
2874 }
2875done:
2876 adapter->isr_registered = true;
2877 return 0;
2878}
2879
2880static void be_irq_unregister(struct be_adapter *adapter)
2881{
2882 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002883 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002884 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002885
2886 if (!adapter->isr_registered)
2887 return;
2888
2889 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002890 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002891 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002892 goto done;
2893 }
2894
2895 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002896 for_all_evt_queues(adapter, eqo, i)
2897 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899done:
2900 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002901}
2902
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002903static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002904{
2905 struct be_queue_info *q;
2906 struct be_rx_obj *rxo;
2907 int i;
2908
2909 for_all_rx_queues(adapter, rxo, i) {
2910 q = &rxo->q;
2911 if (q->created) {
2912 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002913 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002914 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002916 }
2917}
2918
Sathya Perla889cd4b2010-05-30 23:33:45 +00002919static int be_close(struct net_device *netdev)
2920{
2921 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002922 struct be_eq_obj *eqo;
2923 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002924
Kalesh APe1ad8e32014-04-14 16:12:41 +05302925 /* This protection is needed as be_close() may be called even when the
2926 * adapter is in cleared state (after eeh perm failure)
2927 */
2928 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2929 return 0;
2930
Parav Pandit045508a2012-03-26 14:27:13 +00002931 be_roce_dev_close(adapter);
2932
Ivan Veceradff345c52013-11-27 08:59:32 +01002933 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2934 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002935 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302936 be_disable_busy_poll(eqo);
2937 }
David S. Miller71237b62013-11-28 18:53:36 -05002938 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002939 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002940
2941 be_async_mcc_disable(adapter);
2942
2943 /* Wait for all pending tx completions to arrive so that
2944 * all tx skbs are freed.
2945 */
Sathya Perlafba87552013-05-08 02:05:50 +00002946 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302947 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002948
2949 be_rx_qs_destroy(adapter);
2950
Ajit Khaparded11a3472013-11-18 10:44:37 -06002951 for (i = 1; i < (adapter->uc_macs + 1); i++)
2952 be_cmd_pmac_del(adapter, adapter->if_handle,
2953 adapter->pmac_id[i], 0);
2954 adapter->uc_macs = 0;
2955
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002956 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002957 if (msix_enabled(adapter))
2958 synchronize_irq(be_msix_vec_get(adapter, eqo));
2959 else
2960 synchronize_irq(netdev->irq);
2961 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002962 }
2963
Sathya Perla889cd4b2010-05-30 23:33:45 +00002964 be_irq_unregister(adapter);
2965
Sathya Perla482c9e72011-06-29 23:33:17 +00002966 return 0;
2967}
2968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002969static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002970{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002971 struct rss_info *rss = &adapter->rss_info;
2972 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00002973 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002974 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00002975
2976 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2978 sizeof(struct be_eth_rx_d));
2979 if (rc)
2980 return rc;
2981 }
2982
2983 /* The FW would like the default RXQ to be created first */
2984 rxo = default_rxo(adapter);
2985 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2986 adapter->if_handle, false, &rxo->rss_id);
2987 if (rc)
2988 return rc;
2989
2990 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002991 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002992 rx_frag_size, adapter->if_handle,
2993 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002994 if (rc)
2995 return rc;
2996 }
2997
2998 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302999 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3000 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003001 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303002 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003003 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303004 rss->rsstable[j + i] = rxo->rss_id;
3005 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003006 }
3007 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303008 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3009 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003010
3011 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303012 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3013 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303014 } else {
3015 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303016 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303017 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003018
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003019 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303020 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003021 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303022 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303023 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303024 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003025 }
3026
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003027 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303028
Sathya Perla482c9e72011-06-29 23:33:17 +00003029 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003030 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303031 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003032 return 0;
3033}
3034
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003035static int be_open(struct net_device *netdev)
3036{
3037 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003038 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003039 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003040 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003041 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003042 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003043
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003044 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003045 if (status)
3046 goto err;
3047
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003048 status = be_irq_register(adapter);
3049 if (status)
3050 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003051
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003052 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003053 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003054
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003055 for_all_tx_queues(adapter, txo, i)
3056 be_cq_notify(adapter, txo->cq.id, true, 0);
3057
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003058 be_async_mcc_enable(adapter);
3059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003060 for_all_evt_queues(adapter, eqo, i) {
3061 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303062 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303063 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003064 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003065 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003066
Sathya Perla323ff712012-09-28 04:39:43 +00003067 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003068 if (!status)
3069 be_link_status_update(adapter, link_status);
3070
Sathya Perlafba87552013-05-08 02:05:50 +00003071 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003072 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303073
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303074#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303075 if (skyhawk_chip(adapter))
3076 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303077#endif
3078
Sathya Perla889cd4b2010-05-30 23:33:45 +00003079 return 0;
3080err:
3081 be_close(adapter->netdev);
3082 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003083}
3084
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003085static int be_setup_wol(struct be_adapter *adapter, bool enable)
3086{
3087 struct be_dma_mem cmd;
3088 int status = 0;
3089 u8 mac[ETH_ALEN];
3090
3091 memset(mac, 0, ETH_ALEN);
3092
3093 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003094 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3095 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303096 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303097 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003098
3099 if (enable) {
3100 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303101 PCICFG_PM_CONTROL_OFFSET,
3102 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003103 if (status) {
3104 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003105 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003106 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3107 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003108 return status;
3109 }
3110 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303111 adapter->netdev->dev_addr,
3112 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003113 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3114 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3115 } else {
3116 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3117 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3118 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3119 }
3120
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003121 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003122 return status;
3123}
3124
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003125static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3126{
3127 u32 addr;
3128
3129 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3130
3131 mac[5] = (u8)(addr & 0xFF);
3132 mac[4] = (u8)((addr >> 8) & 0xFF);
3133 mac[3] = (u8)((addr >> 16) & 0xFF);
3134 /* Use the OUI from the current MAC address */
3135 memcpy(mac, adapter->netdev->dev_addr, 3);
3136}
3137
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003138/*
3139 * Generate a seed MAC address from the PF MAC Address using jhash.
3140 * MAC Address for VFs are assigned incrementally starting from the seed.
3141 * These addresses are programmed in the ASIC by the PF and the VF driver
3142 * queries for the MAC address during its probe.
3143 */
Sathya Perla4c876612013-02-03 20:30:11 +00003144static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003145{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003146 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003147 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003148 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003149 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003150
3151 be_vf_eth_addr_generate(adapter, mac);
3152
Sathya Perla11ac75e2011-12-13 00:58:50 +00003153 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303154 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003155 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003156 vf_cfg->if_handle,
3157 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303158 else
3159 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3160 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003161
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003162 if (status)
3163 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303164 "Mac address assignment failed for VF %d\n",
3165 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003166 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003167 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003168
3169 mac[5] += 1;
3170 }
3171 return status;
3172}
3173
Sathya Perla4c876612013-02-03 20:30:11 +00003174static int be_vfs_mac_query(struct be_adapter *adapter)
3175{
3176 int status, vf;
3177 u8 mac[ETH_ALEN];
3178 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003179
3180 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303181 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3182 mac, vf_cfg->if_handle,
3183 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003184 if (status)
3185 return status;
3186 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3187 }
3188 return 0;
3189}
3190
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003191static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003192{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003193 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003194 u32 vf;
3195
Sathya Perla257a3fe2013-06-14 15:54:51 +05303196 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003197 dev_warn(&adapter->pdev->dev,
3198 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003199 goto done;
3200 }
3201
Sathya Perlab4c1df92013-05-08 02:05:47 +00003202 pci_disable_sriov(adapter->pdev);
3203
Sathya Perla11ac75e2011-12-13 00:58:50 +00003204 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303205 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003206 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3207 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303208 else
3209 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3210 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003211
Sathya Perla11ac75e2011-12-13 00:58:50 +00003212 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3213 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003214done:
3215 kfree(adapter->vf_cfg);
3216 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303217 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003218}
3219
Sathya Perla77071332013-08-27 16:57:34 +05303220static void be_clear_queues(struct be_adapter *adapter)
3221{
3222 be_mcc_queues_destroy(adapter);
3223 be_rx_cqs_destroy(adapter);
3224 be_tx_queues_destroy(adapter);
3225 be_evt_queues_destroy(adapter);
3226}
3227
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303228static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003229{
Sathya Perla191eb752012-02-23 18:50:13 +00003230 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3231 cancel_delayed_work_sync(&adapter->work);
3232 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3233 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303234}
3235
Somnath Koturb05004a2013-12-05 12:08:16 +05303236static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303237{
3238 int i;
3239
Somnath Koturb05004a2013-12-05 12:08:16 +05303240 if (adapter->pmac_id) {
3241 for (i = 0; i < (adapter->uc_macs + 1); i++)
3242 be_cmd_pmac_del(adapter, adapter->if_handle,
3243 adapter->pmac_id[i], 0);
3244 adapter->uc_macs = 0;
3245
3246 kfree(adapter->pmac_id);
3247 adapter->pmac_id = NULL;
3248 }
3249}
3250
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303251#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303252static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3253{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003254 struct net_device *netdev = adapter->netdev;
3255
Sathya Perlac9c47142014-03-27 10:46:19 +05303256 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3257 be_cmd_manage_iface(adapter, adapter->if_handle,
3258 OP_CONVERT_TUNNEL_TO_NORMAL);
3259
3260 if (adapter->vxlan_port)
3261 be_cmd_set_vxlan_port(adapter, 0);
3262
3263 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3264 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003265
3266 netdev->hw_enc_features = 0;
3267 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303268 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303269}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303270#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303271
Somnath Koturb05004a2013-12-05 12:08:16 +05303272static int be_clear(struct be_adapter *adapter)
3273{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303274 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003275
Sathya Perla11ac75e2011-12-13 00:58:50 +00003276 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003277 be_vf_clear(adapter);
3278
Vasundhara Volambec84e62014-06-30 13:01:32 +05303279 /* Re-configure FW to distribute resources evenly across max-supported
3280 * number of VFs, only when VFs are not already enabled.
3281 */
3282 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3283 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3284 pci_sriov_get_totalvfs(adapter->pdev));
3285
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303286#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303287 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303288#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303289 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303290 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003291
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003292 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003293
Sathya Perla77071332013-08-27 16:57:34 +05303294 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003295
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003296 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303297 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003298 return 0;
3299}
3300
Kalesh AP0700d812015-01-20 03:51:43 -05003301static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3302 u32 cap_flags, u32 vf)
3303{
3304 u32 en_flags;
3305 int status;
3306
3307 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3308 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3309 BE_IF_FLAGS_RSS;
3310
3311 en_flags &= cap_flags;
3312
3313 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3314 if_handle, vf);
3315
3316 return status;
3317}
3318
Sathya Perla4c876612013-02-03 20:30:11 +00003319static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003320{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303321 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003322 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003323 u32 cap_flags, vf;
3324 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003325
Kalesh AP0700d812015-01-20 03:51:43 -05003326 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003327 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3328 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003329
Sathya Perla4c876612013-02-03 20:30:11 +00003330 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303331 if (!BE3_chip(adapter)) {
3332 status = be_cmd_get_profile_config(adapter, &res,
3333 vf + 1);
3334 if (!status)
3335 cap_flags = res.if_cap_flags;
3336 }
Sathya Perla4c876612013-02-03 20:30:11 +00003337
Kalesh AP0700d812015-01-20 03:51:43 -05003338 status = be_if_create(adapter, &vf_cfg->if_handle,
3339 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003340 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003341 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003342 }
Kalesh AP0700d812015-01-20 03:51:43 -05003343
3344 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003345}
3346
Sathya Perla39f1d942012-05-08 19:41:24 +00003347static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003348{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003349 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003350 int vf;
3351
Sathya Perla39f1d942012-05-08 19:41:24 +00003352 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3353 GFP_KERNEL);
3354 if (!adapter->vf_cfg)
3355 return -ENOMEM;
3356
Sathya Perla11ac75e2011-12-13 00:58:50 +00003357 for_all_vfs(adapter, vf_cfg, vf) {
3358 vf_cfg->if_handle = -1;
3359 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003360 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003361 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003362}
3363
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003364static int be_vf_setup(struct be_adapter *adapter)
3365{
Sathya Perla4c876612013-02-03 20:30:11 +00003366 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303367 struct be_vf_cfg *vf_cfg;
3368 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303369 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003370
Sathya Perla257a3fe2013-06-14 15:54:51 +05303371 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003372
3373 status = be_vf_setup_init(adapter);
3374 if (status)
3375 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003376
Sathya Perla4c876612013-02-03 20:30:11 +00003377 if (old_vfs) {
3378 for_all_vfs(adapter, vf_cfg, vf) {
3379 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3380 if (status)
3381 goto err;
3382 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003383
Sathya Perla4c876612013-02-03 20:30:11 +00003384 status = be_vfs_mac_query(adapter);
3385 if (status)
3386 goto err;
3387 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303388 status = be_vfs_if_create(adapter);
3389 if (status)
3390 goto err;
3391
Sathya Perla39f1d942012-05-08 19:41:24 +00003392 status = be_vf_eth_addr_config(adapter);
3393 if (status)
3394 goto err;
3395 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003396
Sathya Perla11ac75e2011-12-13 00:58:50 +00003397 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303398 /* Allow VFs to programs MAC/VLAN filters */
3399 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3400 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3401 status = be_cmd_set_fn_privileges(adapter,
3402 privileges |
3403 BE_PRIV_FILTMGMT,
3404 vf + 1);
3405 if (!status)
3406 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3407 vf);
3408 }
3409
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303410 /* Allow full available bandwidth */
3411 if (!old_vfs)
3412 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003413
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303414 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303415 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303416 be_cmd_set_logical_link_config(adapter,
3417 IFLA_VF_LINK_STATE_AUTO,
3418 vf+1);
3419 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003420 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003421
3422 if (!old_vfs) {
3423 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3424 if (status) {
3425 dev_err(dev, "SRIOV enable failed\n");
3426 adapter->num_vfs = 0;
3427 goto err;
3428 }
3429 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303430
3431 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003432 return 0;
3433err:
Sathya Perla4c876612013-02-03 20:30:11 +00003434 dev_err(dev, "VF setup failed\n");
3435 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003436 return status;
3437}
3438
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303439/* Converting function_mode bits on BE3 to SH mc_type enums */
3440
3441static u8 be_convert_mc_type(u32 function_mode)
3442{
Suresh Reddy66064db2014-06-23 16:41:29 +05303443 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303444 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303445 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303446 return FLEX10;
3447 else if (function_mode & VNIC_MODE)
3448 return vNIC2;
3449 else if (function_mode & UMC_ENABLED)
3450 return UMC;
3451 else
3452 return MC_NONE;
3453}
3454
Sathya Perla92bf14a2013-08-27 16:57:32 +05303455/* On BE2/BE3 FW does not suggest the supported limits */
3456static void BEx_get_resources(struct be_adapter *adapter,
3457 struct be_resources *res)
3458{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303459 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303460
3461 if (be_physfn(adapter))
3462 res->max_uc_mac = BE_UC_PMAC_COUNT;
3463 else
3464 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3465
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303466 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3467
3468 if (be_is_mc(adapter)) {
3469 /* Assuming that there are 4 channels per port,
3470 * when multi-channel is enabled
3471 */
3472 if (be_is_qnq_mode(adapter))
3473 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3474 else
3475 /* In a non-qnq multichannel mode, the pvid
3476 * takes up one vlan entry
3477 */
3478 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3479 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303480 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303481 }
3482
Sathya Perla92bf14a2013-08-27 16:57:32 +05303483 res->max_mcast_mac = BE_MAX_MC;
3484
Vasundhara Volama5243da2014-03-11 18:53:07 +05303485 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3486 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3487 * *only* if it is RSS-capable.
3488 */
3489 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3490 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303491 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303492 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303493 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3494 struct be_resources super_nic_res = {0};
3495
3496 /* On a SuperNIC profile, the driver needs to use the
3497 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3498 */
3499 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3500 /* Some old versions of BE3 FW don't report max_tx_qs value */
3501 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3502 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303503 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303504 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303505
3506 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3507 !use_sriov && be_physfn(adapter))
3508 res->max_rss_qs = (adapter->be3_native) ?
3509 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3510 res->max_rx_qs = res->max_rss_qs + 1;
3511
Suresh Reddye3dc8672014-01-06 13:02:25 +05303512 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303513 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303514 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3515 else
3516 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303517
3518 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3519 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3520 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3521}
3522
Sathya Perla30128032011-11-10 19:17:57 +00003523static void be_setup_init(struct be_adapter *adapter)
3524{
3525 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003526 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003527 adapter->if_handle = -1;
3528 adapter->be3_native = false;
3529 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003530 if (be_physfn(adapter))
3531 adapter->cmd_privileges = MAX_PRIVILEGES;
3532 else
3533 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003534}
3535
Vasundhara Volambec84e62014-06-30 13:01:32 +05303536static int be_get_sriov_config(struct be_adapter *adapter)
3537{
3538 struct device *dev = &adapter->pdev->dev;
3539 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303540 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303541
3542 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303543 be_cmd_get_profile_config(adapter, &res, 0);
3544
Vasundhara Volambec84e62014-06-30 13:01:32 +05303545 if (BE3_chip(adapter) && !res.max_vfs) {
3546 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3547 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3548 }
3549
Sathya Perlad3d18312014-08-01 17:47:30 +05303550 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303551
3552 if (!be_max_vfs(adapter)) {
3553 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303554 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303555 adapter->num_vfs = 0;
3556 return 0;
3557 }
3558
Sathya Perlad3d18312014-08-01 17:47:30 +05303559 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3560
Vasundhara Volambec84e62014-06-30 13:01:32 +05303561 /* validate num_vfs module param */
3562 old_vfs = pci_num_vf(adapter->pdev);
3563 if (old_vfs) {
3564 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3565 if (old_vfs != num_vfs)
3566 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3567 adapter->num_vfs = old_vfs;
3568 } else {
3569 if (num_vfs > be_max_vfs(adapter)) {
3570 dev_info(dev, "Resources unavailable to init %d VFs\n",
3571 num_vfs);
3572 dev_info(dev, "Limiting to %d VFs\n",
3573 be_max_vfs(adapter));
3574 }
3575 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3576 }
3577
3578 return 0;
3579}
3580
Sathya Perla92bf14a2013-08-27 16:57:32 +05303581static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003582{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303583 struct device *dev = &adapter->pdev->dev;
3584 struct be_resources res = {0};
3585 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003586
Sathya Perla92bf14a2013-08-27 16:57:32 +05303587 if (BEx_chip(adapter)) {
3588 BEx_get_resources(adapter, &res);
3589 adapter->res = res;
3590 }
3591
Sathya Perla92bf14a2013-08-27 16:57:32 +05303592 /* For Lancer, SH etc read per-function resource limits from FW.
3593 * GET_FUNC_CONFIG returns per function guaranteed limits.
3594 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3595 */
Sathya Perla4c876612013-02-03 20:30:11 +00003596 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303597 status = be_cmd_get_func_config(adapter, &res);
3598 if (status)
3599 return status;
3600
3601 /* If RoCE may be enabled stash away half the EQs for RoCE */
3602 if (be_roce_supported(adapter))
3603 res.max_evt_qs /= 2;
3604 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003605 }
3606
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303607 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3608 be_max_txqs(adapter), be_max_rxqs(adapter),
3609 be_max_rss(adapter), be_max_eqs(adapter),
3610 be_max_vfs(adapter));
3611 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3612 be_max_uc(adapter), be_max_mc(adapter),
3613 be_max_vlans(adapter));
3614
Sathya Perla92bf14a2013-08-27 16:57:32 +05303615 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003616}
3617
Sathya Perlad3d18312014-08-01 17:47:30 +05303618static void be_sriov_config(struct be_adapter *adapter)
3619{
3620 struct device *dev = &adapter->pdev->dev;
3621 int status;
3622
3623 status = be_get_sriov_config(adapter);
3624 if (status) {
3625 dev_err(dev, "Failed to query SR-IOV configuration\n");
3626 dev_err(dev, "SR-IOV cannot be enabled\n");
3627 return;
3628 }
3629
3630 /* When the HW is in SRIOV capable configuration, the PF-pool
3631 * resources are equally distributed across the max-number of
3632 * VFs. The user may request only a subset of the max-vfs to be
3633 * enabled. Based on num_vfs, redistribute the resources across
3634 * num_vfs so that each VF will have access to more number of
3635 * resources. This facility is not available in BE3 FW.
3636 * Also, this is done by FW in Lancer chip.
3637 */
3638 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3639 status = be_cmd_set_sriov_config(adapter,
3640 adapter->pool_res,
3641 adapter->num_vfs);
3642 if (status)
3643 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3644 }
3645}
3646
Sathya Perla39f1d942012-05-08 19:41:24 +00003647static int be_get_config(struct be_adapter *adapter)
3648{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303649 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003650 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003651
Kalesh APe97e3cd2014-07-17 16:20:26 +05303652 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003653 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303654 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003655
Vasundhara Volam542963b2014-01-15 13:23:33 +05303656 if (be_physfn(adapter)) {
3657 status = be_cmd_get_active_profile(adapter, &profile_id);
3658 if (!status)
3659 dev_info(&adapter->pdev->dev,
3660 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303661 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303662
Sathya Perlad3d18312014-08-01 17:47:30 +05303663 if (!BE2_chip(adapter) && be_physfn(adapter))
3664 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303665
Sathya Perla92bf14a2013-08-27 16:57:32 +05303666 status = be_get_resources(adapter);
3667 if (status)
3668 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003669
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303670 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3671 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303672 if (!adapter->pmac_id)
3673 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003674
Sathya Perla92bf14a2013-08-27 16:57:32 +05303675 /* Sanitize cfg_num_qs based on HW and platform limits */
3676 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3677
3678 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003679}
3680
Sathya Perla95046b92013-07-23 15:25:02 +05303681static int be_mac_setup(struct be_adapter *adapter)
3682{
3683 u8 mac[ETH_ALEN];
3684 int status;
3685
3686 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3687 status = be_cmd_get_perm_mac(adapter, mac);
3688 if (status)
3689 return status;
3690
3691 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3692 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3693 } else {
3694 /* Maybe the HW was reset; dev_addr must be re-programmed */
3695 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3696 }
3697
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003698 /* For BE3-R VFs, the PF programs the initial MAC address */
3699 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3700 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3701 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303702 return 0;
3703}
3704
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303705static void be_schedule_worker(struct be_adapter *adapter)
3706{
3707 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3708 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3709}
3710
Sathya Perla77071332013-08-27 16:57:34 +05303711static int be_setup_queues(struct be_adapter *adapter)
3712{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303713 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303714 int status;
3715
3716 status = be_evt_queues_create(adapter);
3717 if (status)
3718 goto err;
3719
3720 status = be_tx_qs_create(adapter);
3721 if (status)
3722 goto err;
3723
3724 status = be_rx_cqs_create(adapter);
3725 if (status)
3726 goto err;
3727
3728 status = be_mcc_queues_create(adapter);
3729 if (status)
3730 goto err;
3731
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303732 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3733 if (status)
3734 goto err;
3735
3736 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3737 if (status)
3738 goto err;
3739
Sathya Perla77071332013-08-27 16:57:34 +05303740 return 0;
3741err:
3742 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3743 return status;
3744}
3745
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303746int be_update_queues(struct be_adapter *adapter)
3747{
3748 struct net_device *netdev = adapter->netdev;
3749 int status;
3750
3751 if (netif_running(netdev))
3752 be_close(netdev);
3753
3754 be_cancel_worker(adapter);
3755
3756 /* If any vectors have been shared with RoCE we cannot re-program
3757 * the MSIx table.
3758 */
3759 if (!adapter->num_msix_roce_vec)
3760 be_msix_disable(adapter);
3761
3762 be_clear_queues(adapter);
3763
3764 if (!msix_enabled(adapter)) {
3765 status = be_msix_enable(adapter);
3766 if (status)
3767 return status;
3768 }
3769
3770 status = be_setup_queues(adapter);
3771 if (status)
3772 return status;
3773
3774 be_schedule_worker(adapter);
3775
3776 if (netif_running(netdev))
3777 status = be_open(netdev);
3778
3779 return status;
3780}
3781
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003782static inline int fw_major_num(const char *fw_ver)
3783{
3784 int fw_major = 0, i;
3785
3786 i = sscanf(fw_ver, "%d.", &fw_major);
3787 if (i != 1)
3788 return 0;
3789
3790 return fw_major;
3791}
3792
Sathya Perla5fb379e2009-06-18 00:02:59 +00003793static int be_setup(struct be_adapter *adapter)
3794{
Sathya Perla39f1d942012-05-08 19:41:24 +00003795 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003796 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003797
Sathya Perla30128032011-11-10 19:17:57 +00003798 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003799
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003800 if (!lancer_chip(adapter))
3801 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003802
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003803 status = be_get_config(adapter);
3804 if (status)
3805 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003806
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003807 status = be_msix_enable(adapter);
3808 if (status)
3809 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003810
Kalesh AP0700d812015-01-20 03:51:43 -05003811 status = be_if_create(adapter, &adapter->if_handle,
3812 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003813 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003814 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003815
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303816 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3817 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303818 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303819 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003820 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003821 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003823 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003824
Sathya Perla95046b92013-07-23 15:25:02 +05303825 status = be_mac_setup(adapter);
3826 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003827 goto err;
3828
Kalesh APe97e3cd2014-07-17 16:20:26 +05303829 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303830 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003831
Somnath Koture9e2a902013-10-24 14:37:53 +05303832 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303833 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303834 adapter->fw_ver);
3835 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3836 }
3837
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003838 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003839 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003840
3841 be_set_rx_mode(adapter->netdev);
3842
Suresh Reddy76a9e082014-01-15 13:23:40 +05303843 be_cmd_get_acpi_wol_cap(adapter);
3844
Kalesh AP00d594c2015-01-20 03:51:44 -05003845 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3846 adapter->rx_fc);
3847 if (status)
3848 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3849 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003850
Kalesh AP00d594c2015-01-20 03:51:44 -05003851 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3852 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003853
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303854 if (be_physfn(adapter))
3855 be_cmd_set_logical_link_config(adapter,
3856 IFLA_VF_LINK_STATE_AUTO, 0);
3857
Vasundhara Volambec84e62014-06-30 13:01:32 +05303858 if (adapter->num_vfs)
3859 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003860
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003861 status = be_cmd_get_phy_info(adapter);
3862 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003863 adapter->phy.fc_autoneg = 1;
3864
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303865 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303866 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003867 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003868err:
3869 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003870 return status;
3871}
3872
Ivan Vecera66268732011-12-08 01:31:21 +00003873#ifdef CONFIG_NET_POLL_CONTROLLER
3874static void be_netpoll(struct net_device *netdev)
3875{
3876 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003877 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003878 int i;
3879
Sathya Perlae49cc342012-11-27 19:50:02 +00003880 for_all_evt_queues(adapter, eqo, i) {
3881 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3882 napi_schedule(&eqo->napi);
3883 }
Ivan Vecera66268732011-12-08 01:31:21 +00003884}
3885#endif
3886
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303887static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003888
Sathya Perla306f1342011-08-02 19:57:45 +00003889static bool phy_flashing_required(struct be_adapter *adapter)
3890{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05003891 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003892 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003893}
3894
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003895static bool is_comp_in_ufi(struct be_adapter *adapter,
3896 struct flash_section_info *fsec, int type)
3897{
3898 int i = 0, img_type = 0;
3899 struct flash_section_info_g2 *fsec_g2 = NULL;
3900
Sathya Perlaca34fe32012-11-06 17:48:56 +00003901 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003902 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3903
3904 for (i = 0; i < MAX_FLASH_COMP; i++) {
3905 if (fsec_g2)
3906 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3907 else
3908 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3909
3910 if (img_type == type)
3911 return true;
3912 }
3913 return false;
3914
3915}
3916
Jingoo Han4188e7d2013-08-05 18:02:02 +09003917static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303918 int header_size,
3919 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003920{
3921 struct flash_section_info *fsec = NULL;
3922 const u8 *p = fw->data;
3923
3924 p += header_size;
3925 while (p < (fw->data + fw->size)) {
3926 fsec = (struct flash_section_info *)p;
3927 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3928 return fsec;
3929 p += 32;
3930 }
3931 return NULL;
3932}
3933
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303934static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3935 u32 img_offset, u32 img_size, int hdr_size,
3936 u16 img_optype, bool *crc_match)
3937{
3938 u32 crc_offset;
3939 int status;
3940 u8 crc[4];
3941
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003942 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3943 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303944 if (status)
3945 return status;
3946
3947 crc_offset = hdr_size + img_offset + img_size - 4;
3948
3949 /* Skip flashing, if crc of flashed region matches */
3950 if (!memcmp(crc, p + crc_offset, 4))
3951 *crc_match = true;
3952 else
3953 *crc_match = false;
3954
3955 return status;
3956}
3957
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003958static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003959 struct be_dma_mem *flash_cmd, int optype, int img_size,
3960 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003961{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003962 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003963 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303964 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003965
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003966 while (total_bytes) {
3967 num_bytes = min_t(u32, 32*1024, total_bytes);
3968
3969 total_bytes -= num_bytes;
3970
3971 if (!total_bytes) {
3972 if (optype == OPTYPE_PHY_FW)
3973 flash_op = FLASHROM_OPER_PHY_FLASH;
3974 else
3975 flash_op = FLASHROM_OPER_FLASH;
3976 } else {
3977 if (optype == OPTYPE_PHY_FW)
3978 flash_op = FLASHROM_OPER_PHY_SAVE;
3979 else
3980 flash_op = FLASHROM_OPER_SAVE;
3981 }
3982
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003983 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003984 img += num_bytes;
3985 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003986 flash_op, img_offset +
3987 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303988 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303989 optype == OPTYPE_PHY_FW)
3990 break;
3991 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003992 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05003993
3994 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003995 }
3996 return 0;
3997}
3998
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003999/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004000static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304001 const struct firmware *fw,
4002 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004003{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004004 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304005 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004006 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304007 int status, i, filehdr_size, num_comp;
4008 const struct flash_comp *pflashcomp;
4009 bool crc_match;
4010 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004011
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004012 struct flash_comp gen3_flash_types[] = {
4013 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4014 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4015 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4016 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4017 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4018 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4019 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4020 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4021 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4022 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4023 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4024 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4025 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4026 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4027 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4028 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4029 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4030 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4031 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4032 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004033 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004034
4035 struct flash_comp gen2_flash_types[] = {
4036 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4037 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4038 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4039 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4040 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4041 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4042 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4043 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4044 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4045 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4046 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4047 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4048 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4049 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4050 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4051 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004052 };
4053
Sathya Perlaca34fe32012-11-06 17:48:56 +00004054 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004055 pflashcomp = gen3_flash_types;
4056 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004057 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004058 } else {
4059 pflashcomp = gen2_flash_types;
4060 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004061 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004062 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004063 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004064
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004065 /* Get flash section info*/
4066 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4067 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304068 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004069 return -1;
4070 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004071 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004072 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004073 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004074
4075 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4076 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4077 continue;
4078
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004079 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4080 !phy_flashing_required(adapter))
4081 continue;
4082
4083 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304084 status = be_check_flash_crc(adapter, fw->data,
4085 pflashcomp[i].offset,
4086 pflashcomp[i].size,
4087 filehdr_size +
4088 img_hdrs_size,
4089 OPTYPE_REDBOOT, &crc_match);
4090 if (status) {
4091 dev_err(dev,
4092 "Could not get CRC for 0x%x region\n",
4093 pflashcomp[i].optype);
4094 continue;
4095 }
4096
4097 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004098 continue;
4099 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004100
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304101 p = fw->data + filehdr_size + pflashcomp[i].offset +
4102 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004103 if (p + pflashcomp[i].size > fw->data + fw->size)
4104 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004105
4106 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004107 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004108 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304109 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004110 pflashcomp[i].img_type);
4111 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004112 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004113 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004114 return 0;
4115}
4116
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304117static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4118{
4119 u32 img_type = le32_to_cpu(fsec_entry.type);
4120 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4121
4122 if (img_optype != 0xFFFF)
4123 return img_optype;
4124
4125 switch (img_type) {
4126 case IMAGE_FIRMWARE_iSCSI:
4127 img_optype = OPTYPE_ISCSI_ACTIVE;
4128 break;
4129 case IMAGE_BOOT_CODE:
4130 img_optype = OPTYPE_REDBOOT;
4131 break;
4132 case IMAGE_OPTION_ROM_ISCSI:
4133 img_optype = OPTYPE_BIOS;
4134 break;
4135 case IMAGE_OPTION_ROM_PXE:
4136 img_optype = OPTYPE_PXE_BIOS;
4137 break;
4138 case IMAGE_OPTION_ROM_FCoE:
4139 img_optype = OPTYPE_FCOE_BIOS;
4140 break;
4141 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4142 img_optype = OPTYPE_ISCSI_BACKUP;
4143 break;
4144 case IMAGE_NCSI:
4145 img_optype = OPTYPE_NCSI_FW;
4146 break;
4147 case IMAGE_FLASHISM_JUMPVECTOR:
4148 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4149 break;
4150 case IMAGE_FIRMWARE_PHY:
4151 img_optype = OPTYPE_SH_PHY_FW;
4152 break;
4153 case IMAGE_REDBOOT_DIR:
4154 img_optype = OPTYPE_REDBOOT_DIR;
4155 break;
4156 case IMAGE_REDBOOT_CONFIG:
4157 img_optype = OPTYPE_REDBOOT_CONFIG;
4158 break;
4159 case IMAGE_UFI_DIR:
4160 img_optype = OPTYPE_UFI_DIR;
4161 break;
4162 default:
4163 break;
4164 }
4165
4166 return img_optype;
4167}
4168
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004169static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304170 const struct firmware *fw,
4171 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004172{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004173 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004174 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304175 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004176 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304177 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004178 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304179 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304180 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004181
4182 filehdr_size = sizeof(struct flash_file_hdr_g3);
4183 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4184 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304185 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304186 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004187 }
4188
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004189retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004190 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4191 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4192 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304193 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4194 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4195 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004196
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304197 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004198 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004199
4200 if (flash_offset_support)
4201 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4202 else
4203 flash_optype = img_optype;
4204
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304205 /* Don't bother verifying CRC if an old FW image is being
4206 * flashed
4207 */
4208 if (old_fw_img)
4209 goto flash;
4210
4211 status = be_check_flash_crc(adapter, fw->data, img_offset,
4212 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004213 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304214 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304215 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4216 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004217 /* The current FW image on the card does not support
4218 * OFFSET based flashing. Retry using older mechanism
4219 * of OPTYPE based flashing
4220 */
4221 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4222 flash_offset_support = false;
4223 goto retry_flash;
4224 }
4225
4226 /* The current FW image on the card does not recognize
4227 * the new FLASH op_type. The FW download is partially
4228 * complete. Reboot the server now to enable FW image
4229 * to recognize the new FLASH op_type. To complete the
4230 * remaining process, download the same FW again after
4231 * the reboot.
4232 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304233 dev_err(dev, "Flash incomplete. Reset the server\n");
4234 dev_err(dev, "Download FW image again after reset\n");
4235 return -EAGAIN;
4236 } else if (status) {
4237 dev_err(dev, "Could not get CRC for 0x%x region\n",
4238 img_optype);
4239 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004240 }
4241
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304242 if (crc_match)
4243 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004244
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304245flash:
4246 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004247 if (p + img_size > fw->data + fw->size)
4248 return -1;
4249
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004250 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4251 img_offset);
4252
4253 /* The current FW image on the card does not support OFFSET
4254 * based flashing. Retry using older mechanism of OPTYPE based
4255 * flashing
4256 */
4257 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4258 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4259 flash_offset_support = false;
4260 goto retry_flash;
4261 }
4262
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304263 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4264 * UFI_DIR region
4265 */
Kalesh AP4c600052014-05-30 19:06:26 +05304266 if (old_fw_img &&
4267 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4268 (img_optype == OPTYPE_UFI_DIR &&
4269 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304270 continue;
4271 } else if (status) {
4272 dev_err(dev, "Flashing section type 0x%x failed\n",
4273 img_type);
4274 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004275 }
4276 }
4277 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004278}
4279
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004280static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304281 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004282{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004283#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4284#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304285 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004286 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004287 const u8 *data_ptr = NULL;
4288 u8 *dest_image_ptr = NULL;
4289 size_t image_size = 0;
4290 u32 chunk_size = 0;
4291 u32 data_written = 0;
4292 u32 offset = 0;
4293 int status = 0;
4294 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004295 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004296
4297 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304298 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304299 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004300 }
4301
4302 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4303 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304304 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004305 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304306 if (!flash_cmd.va)
4307 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004308
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004309 dest_image_ptr = flash_cmd.va +
4310 sizeof(struct lancer_cmd_req_write_object);
4311 image_size = fw->size;
4312 data_ptr = fw->data;
4313
4314 while (image_size) {
4315 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4316
4317 /* Copy the image chunk content. */
4318 memcpy(dest_image_ptr, data_ptr, chunk_size);
4319
4320 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004321 chunk_size, offset,
4322 LANCER_FW_DOWNLOAD_LOCATION,
4323 &data_written, &change_status,
4324 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004325 if (status)
4326 break;
4327
4328 offset += data_written;
4329 data_ptr += data_written;
4330 image_size -= data_written;
4331 }
4332
4333 if (!status) {
4334 /* Commit the FW written */
4335 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004336 0, offset,
4337 LANCER_FW_DOWNLOAD_LOCATION,
4338 &data_written, &change_status,
4339 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004340 }
4341
Kalesh APbb864e02014-09-02 09:56:51 +05304342 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004343 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304344 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304345 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004346 }
4347
Kalesh APbb864e02014-09-02 09:56:51 +05304348 dev_info(dev, "Firmware flashed successfully\n");
4349
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004350 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304351 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004352 status = lancer_physdev_ctrl(adapter,
4353 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004354 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304355 dev_err(dev, "Adapter busy, could not reset FW\n");
4356 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004357 }
4358 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304359 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004360 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304361
4362 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004363}
4364
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004365#define BE2_UFI 2
4366#define BE3_UFI 3
4367#define BE3R_UFI 10
4368#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004369#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004370
Sathya Perlaca34fe32012-11-06 17:48:56 +00004371static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004372 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004373{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004374 if (!fhdr) {
4375 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4376 return -1;
4377 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004378
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004379 /* First letter of the build version is used to identify
4380 * which chip this image file is meant for.
4381 */
4382 switch (fhdr->build[0]) {
4383 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004384 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4385 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004386 case BLD_STR_UFI_TYPE_BE3:
4387 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4388 BE3_UFI;
4389 case BLD_STR_UFI_TYPE_BE2:
4390 return BE2_UFI;
4391 default:
4392 return -1;
4393 }
4394}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004395
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004396/* Check if the flash image file is compatible with the adapter that
4397 * is being flashed.
4398 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004399 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004400 */
4401static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4402 struct flash_file_hdr_g3 *fhdr)
4403{
4404 int ufi_type = be_get_ufi_type(adapter, fhdr);
4405
4406 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004407 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004408 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004409 case SH_UFI:
4410 return (skyhawk_chip(adapter) &&
4411 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004412 case BE3R_UFI:
4413 return BE3_chip(adapter);
4414 case BE3_UFI:
4415 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4416 case BE2_UFI:
4417 return BE2_chip(adapter);
4418 default:
4419 return false;
4420 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004421}
4422
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004423static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4424{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004425 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004426 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004427 struct image_hdr *img_hdr_ptr;
4428 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004429 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004430
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004431 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4432 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4433 dev_err(dev, "Flash image is not compatible with adapter\n");
4434 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004435 }
4436
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004437 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4438 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4439 GFP_KERNEL);
4440 if (!flash_cmd.va)
4441 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004442
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004443 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4444 for (i = 0; i < num_imgs; i++) {
4445 img_hdr_ptr = (struct image_hdr *)(fw->data +
4446 (sizeof(struct flash_file_hdr_g3) +
4447 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004448 if (!BE2_chip(adapter) &&
4449 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4450 continue;
4451
4452 if (skyhawk_chip(adapter))
4453 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4454 num_imgs);
4455 else
4456 status = be_flash_BEx(adapter, fw, &flash_cmd,
4457 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004458 }
4459
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004460 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4461 if (!status)
4462 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004463
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004464 return status;
4465}
4466
4467int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4468{
4469 const struct firmware *fw;
4470 int status;
4471
4472 if (!netif_running(adapter->netdev)) {
4473 dev_err(&adapter->pdev->dev,
4474 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304475 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004476 }
4477
4478 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4479 if (status)
4480 goto fw_exit;
4481
4482 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4483
4484 if (lancer_chip(adapter))
4485 status = lancer_fw_download(adapter, fw);
4486 else
4487 status = be_fw_download(adapter, fw);
4488
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004489 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304490 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004491
Ajit Khaparde84517482009-09-04 03:12:16 +00004492fw_exit:
4493 release_firmware(fw);
4494 return status;
4495}
4496
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004497static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4498 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004499{
4500 struct be_adapter *adapter = netdev_priv(dev);
4501 struct nlattr *attr, *br_spec;
4502 int rem;
4503 int status = 0;
4504 u16 mode = 0;
4505
4506 if (!sriov_enabled(adapter))
4507 return -EOPNOTSUPP;
4508
4509 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004510 if (!br_spec)
4511 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004512
4513 nla_for_each_nested(attr, br_spec, rem) {
4514 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4515 continue;
4516
Thomas Grafb7c1a312014-11-26 13:42:17 +01004517 if (nla_len(attr) < sizeof(mode))
4518 return -EINVAL;
4519
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004520 mode = nla_get_u16(attr);
4521 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4522 return -EINVAL;
4523
4524 status = be_cmd_set_hsw_config(adapter, 0, 0,
4525 adapter->if_handle,
4526 mode == BRIDGE_MODE_VEPA ?
4527 PORT_FWD_TYPE_VEPA :
4528 PORT_FWD_TYPE_VEB);
4529 if (status)
4530 goto err;
4531
4532 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4533 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4534
4535 return status;
4536 }
4537err:
4538 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4539 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4540
4541 return status;
4542}
4543
4544static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304545 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004546{
4547 struct be_adapter *adapter = netdev_priv(dev);
4548 int status = 0;
4549 u8 hsw_mode;
4550
4551 if (!sriov_enabled(adapter))
4552 return 0;
4553
4554 /* BE and Lancer chips support VEB mode only */
4555 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4556 hsw_mode = PORT_FWD_TYPE_VEB;
4557 } else {
4558 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4559 adapter->if_handle, &hsw_mode);
4560 if (status)
4561 return 0;
4562 }
4563
4564 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4565 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004566 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4567 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004568}
4569
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304570#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004571/* VxLAN offload Notes:
4572 *
4573 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4574 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4575 * is expected to work across all types of IP tunnels once exported. Skyhawk
4576 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304577 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4578 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4579 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004580 *
4581 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4582 * adds more than one port, disable offloads and don't re-enable them again
4583 * until after all the tunnels are removed.
4584 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304585static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4586 __be16 port)
4587{
4588 struct be_adapter *adapter = netdev_priv(netdev);
4589 struct device *dev = &adapter->pdev->dev;
4590 int status;
4591
4592 if (lancer_chip(adapter) || BEx_chip(adapter))
4593 return;
4594
4595 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304596 dev_info(dev,
4597 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004598 dev_info(dev, "Disabling VxLAN offloads\n");
4599 adapter->vxlan_port_count++;
4600 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304601 }
4602
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004603 if (adapter->vxlan_port_count++ >= 1)
4604 return;
4605
Sathya Perlac9c47142014-03-27 10:46:19 +05304606 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4607 OP_CONVERT_NORMAL_TO_TUNNEL);
4608 if (status) {
4609 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4610 goto err;
4611 }
4612
4613 status = be_cmd_set_vxlan_port(adapter, port);
4614 if (status) {
4615 dev_warn(dev, "Failed to add VxLAN port\n");
4616 goto err;
4617 }
4618 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4619 adapter->vxlan_port = port;
4620
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004621 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4622 NETIF_F_TSO | NETIF_F_TSO6 |
4623 NETIF_F_GSO_UDP_TUNNEL;
4624 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304625 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004626
Sathya Perlac9c47142014-03-27 10:46:19 +05304627 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4628 be16_to_cpu(port));
4629 return;
4630err:
4631 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304632}
4633
4634static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4635 __be16 port)
4636{
4637 struct be_adapter *adapter = netdev_priv(netdev);
4638
4639 if (lancer_chip(adapter) || BEx_chip(adapter))
4640 return;
4641
4642 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004643 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304644
4645 be_disable_vxlan_offloads(adapter);
4646
4647 dev_info(&adapter->pdev->dev,
4648 "Disabled VxLAN offloads for UDP port %d\n",
4649 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004650done:
4651 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304652}
Joe Stringer725d5482014-11-13 16:38:13 -08004653
Jesse Gross5f352272014-12-23 22:37:26 -08004654static netdev_features_t be_features_check(struct sk_buff *skb,
4655 struct net_device *dev,
4656 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004657{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304658 struct be_adapter *adapter = netdev_priv(dev);
4659 u8 l4_hdr = 0;
4660
4661 /* The code below restricts offload features for some tunneled packets.
4662 * Offload features for normal (non tunnel) packets are unchanged.
4663 */
4664 if (!skb->encapsulation ||
4665 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4666 return features;
4667
4668 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4669 * should disable tunnel offload features if it's not a VxLAN packet,
4670 * as tunnel offloads have been enabled only for VxLAN. This is done to
4671 * allow other tunneled traffic like GRE work fine while VxLAN
4672 * offloads are configured in Skyhawk-R.
4673 */
4674 switch (vlan_get_protocol(skb)) {
4675 case htons(ETH_P_IP):
4676 l4_hdr = ip_hdr(skb)->protocol;
4677 break;
4678 case htons(ETH_P_IPV6):
4679 l4_hdr = ipv6_hdr(skb)->nexthdr;
4680 break;
4681 default:
4682 return features;
4683 }
4684
4685 if (l4_hdr != IPPROTO_UDP ||
4686 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4687 skb->inner_protocol != htons(ETH_P_TEB) ||
4688 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4689 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4690 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4691
4692 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004693}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304694#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304695
stephen hemmingere5686ad2012-01-05 19:10:25 +00004696static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004697 .ndo_open = be_open,
4698 .ndo_stop = be_close,
4699 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004700 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004701 .ndo_set_mac_address = be_mac_addr_set,
4702 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004703 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004704 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004705 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4706 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004707 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004708 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004709 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004710 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304711 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004712#ifdef CONFIG_NET_POLL_CONTROLLER
4713 .ndo_poll_controller = be_netpoll,
4714#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004715 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4716 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304717#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304718 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304719#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304720#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304721 .ndo_add_vxlan_port = be_add_vxlan_port,
4722 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004723 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304724#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004725};
4726
4727static void be_netdev_init(struct net_device *netdev)
4728{
4729 struct be_adapter *adapter = netdev_priv(netdev);
4730
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004731 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004732 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004733 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004734 if (be_multi_rxq(adapter))
4735 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004736
4737 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004738 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004739
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004740 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004741 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004742
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004743 netdev->priv_flags |= IFF_UNICAST_FLT;
4744
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004745 netdev->flags |= IFF_MULTICAST;
4746
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004747 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004749 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004750
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004751 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004752}
4753
4754static void be_unmap_pci_bars(struct be_adapter *adapter)
4755{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004756 if (adapter->csr)
4757 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004758 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004759 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004760}
4761
Sathya Perlace66f782012-11-06 17:48:58 +00004762static int db_bar(struct be_adapter *adapter)
4763{
4764 if (lancer_chip(adapter) || !be_physfn(adapter))
4765 return 0;
4766 else
4767 return 4;
4768}
4769
4770static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004771{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004772 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004773 adapter->roce_db.size = 4096;
4774 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4775 db_bar(adapter));
4776 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4777 db_bar(adapter));
4778 }
Parav Pandit045508a2012-03-26 14:27:13 +00004779 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004780}
4781
4782static int be_map_pci_bars(struct be_adapter *adapter)
4783{
4784 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004785
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004786 if (BEx_chip(adapter) && be_physfn(adapter)) {
4787 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304788 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004789 return -ENOMEM;
4790 }
4791
Sathya Perlace66f782012-11-06 17:48:58 +00004792 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304793 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004794 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004795 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004796
4797 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004798 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004800pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304801 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004802 be_unmap_pci_bars(adapter);
4803 return -ENOMEM;
4804}
4805
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004806static void be_ctrl_cleanup(struct be_adapter *adapter)
4807{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004808 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004809
4810 be_unmap_pci_bars(adapter);
4811
4812 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004813 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4814 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004815
Sathya Perla5b8821b2011-08-02 19:57:44 +00004816 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004817 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004818 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4819 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004820}
4821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004822static int be_ctrl_init(struct be_adapter *adapter)
4823{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004824 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4825 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004826 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004827 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004828 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004829
Sathya Perlace66f782012-11-06 17:48:58 +00004830 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4831 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4832 SLI_INTF_FAMILY_SHIFT;
4833 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4834
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004835 status = be_map_pci_bars(adapter);
4836 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004837 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004838
4839 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004840 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4841 mbox_mem_alloc->size,
4842 &mbox_mem_alloc->dma,
4843 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004844 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004845 status = -ENOMEM;
4846 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004847 }
4848 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4849 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4850 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4851 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004852
Sathya Perla5b8821b2011-08-02 19:57:44 +00004853 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004854 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4855 rx_filter->size, &rx_filter->dma,
4856 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304857 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004858 status = -ENOMEM;
4859 goto free_mbox;
4860 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004861
Ivan Vecera29849612010-12-14 05:43:19 +00004862 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004863 spin_lock_init(&adapter->mcc_lock);
4864 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004865
Suresh Reddy5eeff632014-01-06 13:02:24 +05304866 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004867 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004868 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004869
4870free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004871 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4872 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004873
4874unmap_pci_bars:
4875 be_unmap_pci_bars(adapter);
4876
4877done:
4878 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004879}
4880
4881static void be_stats_cleanup(struct be_adapter *adapter)
4882{
Sathya Perla3abcded2010-10-03 22:12:27 -07004883 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004884
4885 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004886 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4887 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004888}
4889
4890static int be_stats_init(struct be_adapter *adapter)
4891{
Sathya Perla3abcded2010-10-03 22:12:27 -07004892 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004893
Sathya Perlaca34fe32012-11-06 17:48:56 +00004894 if (lancer_chip(adapter))
4895 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4896 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004897 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004898 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004899 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004900 else
4901 /* ALL non-BE ASICs */
4902 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004903
Joe Perchesede23fa2013-08-26 22:45:23 -07004904 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4905 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304906 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304907 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004908 return 0;
4909}
4910
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004911static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004912{
4913 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004915 if (!adapter)
4916 return;
4917
Parav Pandit045508a2012-03-26 14:27:13 +00004918 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004919 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004920
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004921 cancel_delayed_work_sync(&adapter->func_recovery_work);
4922
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004923 unregister_netdev(adapter->netdev);
4924
Sathya Perla5fb379e2009-06-18 00:02:59 +00004925 be_clear(adapter);
4926
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004927 /* tell fw we're done with firing cmds */
4928 be_cmd_fw_clean(adapter);
4929
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004930 be_stats_cleanup(adapter);
4931
4932 be_ctrl_cleanup(adapter);
4933
Sathya Perlad6b6d982012-09-05 01:56:48 +00004934 pci_disable_pcie_error_reporting(pdev);
4935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004936 pci_release_regions(pdev);
4937 pci_disable_device(pdev);
4938
4939 free_netdev(adapter->netdev);
4940}
4941
Sathya Perla39f1d942012-05-08 19:41:24 +00004942static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004943{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304944 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004945
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004946 status = be_cmd_get_cntl_attributes(adapter);
4947 if (status)
4948 return status;
4949
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004950 /* Must be a power of 2 or else MODULO will BUG_ON */
4951 adapter->be_get_temp_freq = 64;
4952
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304953 if (BEx_chip(adapter)) {
4954 level = be_cmd_get_fw_log_level(adapter);
4955 adapter->msg_enable =
4956 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4957 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004958
Sathya Perla92bf14a2013-08-27 16:57:32 +05304959 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004960 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004961}
4962
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004963static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004964{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004965 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004966 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004967
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004968 status = lancer_test_and_set_rdy_state(adapter);
4969 if (status)
4970 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004971
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004972 if (netif_running(adapter->netdev))
4973 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004974
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004975 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004976
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004977 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004978
4979 status = be_setup(adapter);
4980 if (status)
4981 goto err;
4982
4983 if (netif_running(adapter->netdev)) {
4984 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004985 if (status)
4986 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004987 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004988
Somnath Kotur4bebb562013-12-05 12:07:55 +05304989 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004990 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004991err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004992 if (status == -EAGAIN)
4993 dev_err(dev, "Waiting for resource provisioning\n");
4994 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304995 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004996
4997 return status;
4998}
4999
5000static void be_func_recovery_task(struct work_struct *work)
5001{
5002 struct be_adapter *adapter =
5003 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005004 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005005
5006 be_detect_error(adapter);
5007
5008 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005009 rtnl_lock();
5010 netif_device_detach(adapter->netdev);
5011 rtnl_unlock();
5012
5013 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005014 if (!status)
5015 netif_device_attach(adapter->netdev);
5016 }
5017
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005018 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5019 * no need to attempt further recovery.
5020 */
5021 if (!status || status == -EAGAIN)
5022 schedule_delayed_work(&adapter->func_recovery_work,
5023 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005024}
5025
5026static void be_worker(struct work_struct *work)
5027{
5028 struct be_adapter *adapter =
5029 container_of(work, struct be_adapter, work.work);
5030 struct be_rx_obj *rxo;
5031 int i;
5032
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005033 /* when interrupts are not yet enabled, just reap any pending
5034 * mcc completions */
5035 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005036 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005037 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005038 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005039 goto reschedule;
5040 }
5041
5042 if (!adapter->stats_cmd_sent) {
5043 if (lancer_chip(adapter))
5044 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305045 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005046 else
5047 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5048 }
5049
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305050 if (be_physfn(adapter) &&
5051 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005052 be_cmd_get_die_temperature(adapter);
5053
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005054 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305055 /* Replenish RX-queues starved due to memory
5056 * allocation failures.
5057 */
5058 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305059 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005060 }
5061
Sathya Perla2632baf2013-10-01 16:00:00 +05305062 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005063
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005064reschedule:
5065 adapter->work_counter++;
5066 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5067}
5068
Sathya Perla257a3fe2013-06-14 15:54:51 +05305069/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00005070static bool be_reset_required(struct be_adapter *adapter)
5071{
Sathya Perla257a3fe2013-06-14 15:54:51 +05305072 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00005073}
5074
Sathya Perlad3791422012-09-28 04:39:44 +00005075static char *mc_name(struct be_adapter *adapter)
5076{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305077 char *str = ""; /* default */
5078
5079 switch (adapter->mc_type) {
5080 case UMC:
5081 str = "UMC";
5082 break;
5083 case FLEX10:
5084 str = "FLEX10";
5085 break;
5086 case vNIC1:
5087 str = "vNIC-1";
5088 break;
5089 case nPAR:
5090 str = "nPAR";
5091 break;
5092 case UFP:
5093 str = "UFP";
5094 break;
5095 case vNIC2:
5096 str = "vNIC-2";
5097 break;
5098 default:
5099 str = "";
5100 }
5101
5102 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005103}
5104
5105static inline char *func_name(struct be_adapter *adapter)
5106{
5107 return be_physfn(adapter) ? "PF" : "VF";
5108}
5109
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005110static inline char *nic_name(struct pci_dev *pdev)
5111{
5112 switch (pdev->device) {
5113 case OC_DEVICE_ID1:
5114 return OC_NAME;
5115 case OC_DEVICE_ID2:
5116 return OC_NAME_BE;
5117 case OC_DEVICE_ID3:
5118 case OC_DEVICE_ID4:
5119 return OC_NAME_LANCER;
5120 case BE_DEVICE_ID2:
5121 return BE3_NAME;
5122 case OC_DEVICE_ID5:
5123 case OC_DEVICE_ID6:
5124 return OC_NAME_SH;
5125 default:
5126 return BE_NAME;
5127 }
5128}
5129
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005130static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005131{
5132 int status = 0;
5133 struct be_adapter *adapter;
5134 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005135 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005136
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305137 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5138
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005139 status = pci_enable_device(pdev);
5140 if (status)
5141 goto do_none;
5142
5143 status = pci_request_regions(pdev, DRV_NAME);
5144 if (status)
5145 goto disable_dev;
5146 pci_set_master(pdev);
5147
Sathya Perla7f640062012-06-05 19:37:20 +00005148 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305149 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005150 status = -ENOMEM;
5151 goto rel_reg;
5152 }
5153 adapter = netdev_priv(netdev);
5154 adapter->pdev = pdev;
5155 pci_set_drvdata(pdev, adapter);
5156 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005157 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005158
Russell King4c15c242013-06-26 23:49:11 +01005159 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005160 if (!status) {
5161 netdev->features |= NETIF_F_HIGHDMA;
5162 } else {
Russell King4c15c242013-06-26 23:49:11 +01005163 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005164 if (status) {
5165 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5166 goto free_netdev;
5167 }
5168 }
5169
Kalesh AP2f951a92014-09-12 17:39:21 +05305170 status = pci_enable_pcie_error_reporting(pdev);
5171 if (!status)
5172 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005174 status = be_ctrl_init(adapter);
5175 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005176 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005177
Sathya Perla2243e2e2009-11-22 22:02:03 +00005178 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005179 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005180 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005181 if (status)
5182 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005183 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00005184
Sathya Perla39f1d942012-05-08 19:41:24 +00005185 if (be_reset_required(adapter)) {
5186 status = be_cmd_reset_function(adapter);
5187 if (status)
5188 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07005189
Kalesh AP2d177be2013-04-28 22:22:29 +00005190 /* Wait for interrupts to quiesce after an FLR */
5191 msleep(100);
5192 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005193
5194 /* Allow interrupts for other ULPs running on NIC function */
5195 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005196
Kalesh AP2d177be2013-04-28 22:22:29 +00005197 /* tell fw we're ready to fire cmds */
5198 status = be_cmd_fw_init(adapter);
5199 if (status)
5200 goto ctrl_clean;
5201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005202 status = be_stats_init(adapter);
5203 if (status)
5204 goto ctrl_clean;
5205
Sathya Perla39f1d942012-05-08 19:41:24 +00005206 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005207 if (status)
5208 goto stats_clean;
5209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005210 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005211 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05305212 adapter->rx_fc = true;
5213 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005214
Sathya Perla5fb379e2009-06-18 00:02:59 +00005215 status = be_setup(adapter);
5216 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00005217 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005218
Sathya Perla3abcded2010-10-03 22:12:27 -07005219 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005220 status = register_netdev(netdev);
5221 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005222 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005223
Parav Pandit045508a2012-03-26 14:27:13 +00005224 be_roce_dev_add(adapter);
5225
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005226 schedule_delayed_work(&adapter->func_recovery_work,
5227 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005228
5229 be_cmd_query_port_name(adapter, &port_name);
5230
Sathya Perlad3791422012-09-28 04:39:44 +00005231 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5232 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005234 return 0;
5235
Sathya Perla5fb379e2009-06-18 00:02:59 +00005236unsetup:
5237 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005238stats_clean:
5239 be_stats_cleanup(adapter);
5240ctrl_clean:
5241 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005242free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005243 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005244rel_reg:
5245 pci_release_regions(pdev);
5246disable_dev:
5247 pci_disable_device(pdev);
5248do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005249 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005250 return status;
5251}
5252
5253static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5254{
5255 struct be_adapter *adapter = pci_get_drvdata(pdev);
5256 struct net_device *netdev = adapter->netdev;
5257
Suresh Reddy76a9e082014-01-15 13:23:40 +05305258 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005259 be_setup_wol(adapter, true);
5260
Ajit Khaparded4360d62013-11-22 12:51:09 -06005261 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005262 cancel_delayed_work_sync(&adapter->func_recovery_work);
5263
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005264 netif_device_detach(netdev);
5265 if (netif_running(netdev)) {
5266 rtnl_lock();
5267 be_close(netdev);
5268 rtnl_unlock();
5269 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005270 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005271
5272 pci_save_state(pdev);
5273 pci_disable_device(pdev);
5274 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5275 return 0;
5276}
5277
5278static int be_resume(struct pci_dev *pdev)
5279{
5280 int status = 0;
5281 struct be_adapter *adapter = pci_get_drvdata(pdev);
5282 struct net_device *netdev = adapter->netdev;
5283
5284 netif_device_detach(netdev);
5285
5286 status = pci_enable_device(pdev);
5287 if (status)
5288 return status;
5289
Yijing Wang1ca01512013-06-27 20:53:42 +08005290 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005291 pci_restore_state(pdev);
5292
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305293 status = be_fw_wait_ready(adapter);
5294 if (status)
5295 return status;
5296
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005297 status = be_cmd_reset_function(adapter);
5298 if (status)
5299 return status;
5300
Ajit Khaparded4360d62013-11-22 12:51:09 -06005301 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005302 /* tell fw we're ready to fire cmds */
5303 status = be_cmd_fw_init(adapter);
5304 if (status)
5305 return status;
5306
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005307 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005308 if (netif_running(netdev)) {
5309 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005310 be_open(netdev);
5311 rtnl_unlock();
5312 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005313
5314 schedule_delayed_work(&adapter->func_recovery_work,
5315 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005316 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005317
Suresh Reddy76a9e082014-01-15 13:23:40 +05305318 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005319 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005320
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005321 return 0;
5322}
5323
Sathya Perla82456b02010-02-17 01:35:37 +00005324/*
5325 * An FLR will stop BE from DMAing any data.
5326 */
5327static void be_shutdown(struct pci_dev *pdev)
5328{
5329 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005330
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005331 if (!adapter)
5332 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005333
Devesh Sharmad114f992014-06-10 19:32:15 +05305334 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005335 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005336 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005337
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005338 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005339
Ajit Khaparde57841862011-04-06 18:08:43 +00005340 be_cmd_reset_function(adapter);
5341
Sathya Perla82456b02010-02-17 01:35:37 +00005342 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005343}
5344
Sathya Perlacf588472010-02-14 21:22:01 +00005345static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305346 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005347{
5348 struct be_adapter *adapter = pci_get_drvdata(pdev);
5349 struct net_device *netdev = adapter->netdev;
5350
5351 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5352
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005353 if (!adapter->eeh_error) {
5354 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005355
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005356 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005357
Sathya Perlacf588472010-02-14 21:22:01 +00005358 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005359 netif_device_detach(netdev);
5360 if (netif_running(netdev))
5361 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005362 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005363
5364 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005365 }
Sathya Perlacf588472010-02-14 21:22:01 +00005366
5367 if (state == pci_channel_io_perm_failure)
5368 return PCI_ERS_RESULT_DISCONNECT;
5369
5370 pci_disable_device(pdev);
5371
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005372 /* The error could cause the FW to trigger a flash debug dump.
5373 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005374 * can cause it not to recover; wait for it to finish.
5375 * Wait only for first function as it is needed only once per
5376 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005377 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005378 if (pdev->devfn == 0)
5379 ssleep(30);
5380
Sathya Perlacf588472010-02-14 21:22:01 +00005381 return PCI_ERS_RESULT_NEED_RESET;
5382}
5383
5384static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5385{
5386 struct be_adapter *adapter = pci_get_drvdata(pdev);
5387 int status;
5388
5389 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005390
5391 status = pci_enable_device(pdev);
5392 if (status)
5393 return PCI_ERS_RESULT_DISCONNECT;
5394
5395 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005396 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005397 pci_restore_state(pdev);
5398
5399 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005400 dev_info(&adapter->pdev->dev,
5401 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005402 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005403 if (status)
5404 return PCI_ERS_RESULT_DISCONNECT;
5405
Sathya Perlad6b6d982012-09-05 01:56:48 +00005406 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005407 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005408 return PCI_ERS_RESULT_RECOVERED;
5409}
5410
5411static void be_eeh_resume(struct pci_dev *pdev)
5412{
5413 int status = 0;
5414 struct be_adapter *adapter = pci_get_drvdata(pdev);
5415 struct net_device *netdev = adapter->netdev;
5416
5417 dev_info(&adapter->pdev->dev, "EEH resume\n");
5418
5419 pci_save_state(pdev);
5420
Kalesh AP2d177be2013-04-28 22:22:29 +00005421 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005422 if (status)
5423 goto err;
5424
Kalesh AP03a58ba2014-05-13 14:03:11 +05305425 /* On some BE3 FW versions, after a HW reset,
5426 * interrupts will remain disabled for each function.
5427 * So, explicitly enable interrupts
5428 */
5429 be_intr_set(adapter, true);
5430
Kalesh AP2d177be2013-04-28 22:22:29 +00005431 /* tell fw we're ready to fire cmds */
5432 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005433 if (status)
5434 goto err;
5435
Sathya Perlacf588472010-02-14 21:22:01 +00005436 status = be_setup(adapter);
5437 if (status)
5438 goto err;
5439
5440 if (netif_running(netdev)) {
5441 status = be_open(netdev);
5442 if (status)
5443 goto err;
5444 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005445
5446 schedule_delayed_work(&adapter->func_recovery_work,
5447 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005448 netif_device_attach(netdev);
5449 return;
5450err:
5451 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005452}
5453
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005454static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005455 .error_detected = be_eeh_err_detected,
5456 .slot_reset = be_eeh_reset,
5457 .resume = be_eeh_resume,
5458};
5459
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005460static struct pci_driver be_driver = {
5461 .name = DRV_NAME,
5462 .id_table = be_dev_ids,
5463 .probe = be_probe,
5464 .remove = be_remove,
5465 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005466 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005467 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005468 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005469};
5470
5471static int __init be_init_module(void)
5472{
Joe Perches8e95a202009-12-03 07:58:21 +00005473 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5474 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005475 printk(KERN_WARNING DRV_NAME
5476 " : Module param rx_frag_size must be 2048/4096/8192."
5477 " Using 2048\n");
5478 rx_frag_size = 2048;
5479 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005480
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005481 return pci_register_driver(&be_driver);
5482}
5483module_init(be_init_module);
5484
5485static void __exit be_exit_module(void)
5486{
5487 pci_unregister_driver(&be_driver);
5488}
5489module_exit(be_exit_module);