blob: 2b9e1be1568d8665ba64e173c602cc7c7b78a504 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666{
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 struct be_tx_stats *stats = tx_stats(txo);
668
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000688 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689}
690
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530692 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100697 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
Sathya Perlac9c47142014-03-27 10:46:19 +0530707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Sathya Perlac9c47142014-03-27 10:46:19 +0530724 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530736 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100748 if (skb_vlan_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 }
753
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530765 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000772 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000773 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000776 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000778 }
779}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000786 struct device *dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500787 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000789 bool map_single = false;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 hdr = queue_head_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 queue_head_inc(txq);
799
David S. Millerebc8d2a2009-06-09 01:01:31 -0700800 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700801 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530802
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000805 goto dma_err;
806 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ian Campbellb061b392011-08-29 23:18:23 +0000817 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000818 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000820 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 }
827
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Sathya Perla7101e112010-03-22 20:41:12 +0000838dma_err:
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 while (copied) {
846 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000847 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 map_single = false;
849 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530850 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000851 queue_head_inc(txq);
852 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500853 txq->head = head;
Sathya Perla7101e112010-03-22 20:41:12 +0000854 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855}
856
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500857static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
858{
859 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
860}
861
Somnath Kotur93040ae2012-06-26 22:32:10 +0000862static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000863 struct sk_buff *skb,
864 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000865{
866 u16 vlan_tag = 0;
867
868 skb = skb_share_check(skb, GFP_ATOMIC);
869 if (unlikely(!skb))
870 return skb;
871
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100872 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000873 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530874
875 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
876 if (!vlan_tag)
877 vlan_tag = adapter->pvid;
878 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
879 * skip VLAN insertion
880 */
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000884
885 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100886 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
887 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888 if (unlikely(!skb))
889 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000890 skb->vlan_tci = 0;
891 }
892
893 /* Insert the outer VLAN, if any */
894 if (adapter->qnq_vid) {
895 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100896 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
897 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000898 if (unlikely(!skb))
899 return skb;
900 if (skip_hw_vlan)
901 *skip_hw_vlan = true;
902 }
903
Somnath Kotur93040ae2012-06-26 22:32:10 +0000904 return skb;
905}
906
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000907static bool be_ipv6_exthdr_check(struct sk_buff *skb)
908{
909 struct ethhdr *eh = (struct ethhdr *)skb->data;
910 u16 offset = ETH_HLEN;
911
912 if (eh->h_proto == htons(ETH_P_IPV6)) {
913 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
914
915 offset += sizeof(struct ipv6hdr);
916 if (ip6h->nexthdr != NEXTHDR_TCP &&
917 ip6h->nexthdr != NEXTHDR_UDP) {
918 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530919 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000920
921 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
922 if (ehdr->hdrlen == 0xff)
923 return true;
924 }
925 }
926 return false;
927}
928
929static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
930{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100931 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000932}
933
Sathya Perla748b5392014-05-09 13:29:13 +0530934static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000935{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000936 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000937}
938
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530939static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
940 struct sk_buff *skb,
941 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000943 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000944 unsigned int eth_hdr_len;
945 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000946
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000947 /* For padded packets, BE HW modifies tot_len field in IP header
948 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000949 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000950 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000951 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
952 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000953 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100954 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000955 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000956 ip = (struct iphdr *)ip_hdr(skb);
957 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
958 }
959
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530961 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000962 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530963 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530965 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966
Somnath Kotur93040ae2012-06-26 22:32:10 +0000967 /* HW has a bug wherein it will calculate CSUM for VLAN
968 * pkts even though it is disabled.
969 * Manually insert VLAN in pkt.
970 */
971 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100972 skb_vlan_tag_present(skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000973 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000974 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530975 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000976 }
977
978 /* HW may lockup when VLAN HW tagging is requested on
979 * certain ipv6 packets. Drop such pkts if the HW workaround to
980 * skip HW tagging is not enabled by FW.
981 */
982 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530983 (adapter->pvid || adapter->qnq_vid) &&
984 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000985 goto tx_drop;
986
987 /* Manual VLAN tag insertion to prevent:
988 * ASIC lockup when the ASIC inserts VLAN tag into
989 * certain ipv6 packets. Insert VLAN tags in driver,
990 * and set event, completion, vlan bits accordingly
991 * in the Tx WRB.
992 */
993 if (be_ipv6_tx_stall_chk(adapter, skb) &&
994 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000995 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000996 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000998 }
999
Sathya Perlaee9c7992013-05-22 23:04:55 +00001000 return skb;
1001tx_drop:
1002 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301003err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 return NULL;
1005}
1006
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301007static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1008 struct sk_buff *skb,
1009 bool *skip_hw_vlan)
1010{
1011 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1012 * less may cause a transmit stall on that port. So the work-around is
1013 * to pad short packets (<= 32 bytes) to a 36-byte length.
1014 */
1015 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001016 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301017 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301018 }
1019
1020 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1021 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1022 if (!skb)
1023 return NULL;
1024 }
1025
1026 return skb;
1027}
1028
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001029static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1030{
1031 struct be_queue_info *txq = &txo->q;
1032 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1033
1034 /* Mark the last request eventable if it hasn't been marked already */
1035 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1036 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1037
1038 /* compose a dummy wrb if there are odd set of wrbs to notify */
1039 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1040 wrb_fill(queue_head_node(txq), 0, 0);
1041 queue_head_inc(txq);
1042 atomic_inc(&txq->used);
1043 txo->pend_wrb_cnt++;
1044 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1045 TX_HDR_WRB_NUM_SHIFT);
1046 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1047 TX_HDR_WRB_NUM_SHIFT);
1048 }
1049 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1050 txo->pend_wrb_cnt = 0;
1051}
1052
Sathya Perlaee9c7992013-05-22 23:04:55 +00001053static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1054{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001055 bool skip_hw_vlan = false, flush = !skb->xmit_more;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001057 u16 q_idx = skb_get_queue_mapping(skb);
1058 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001060 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001061
1062 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001063 if (unlikely(!skb))
1064 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001065
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001066 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1067 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001069 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001071
1072 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1073 netif_stop_subqueue(netdev, q_idx);
1074 tx_stats(txo)->tx_stops++;
1075 }
1076
1077 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1078 be_xmit_flush(adapter, txo);
1079
1080 return NETDEV_TX_OK;
1081drop:
1082 tx_stats(txo)->tx_drv_drops++;
1083 /* Flush the already enqueued tx requests */
1084 if (flush && txo->pend_wrb_cnt)
1085 be_xmit_flush(adapter, txo);
1086
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 return NETDEV_TX_OK;
1088}
1089
1090static int be_change_mtu(struct net_device *netdev, int new_mtu)
1091{
1092 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301093 struct device *dev = &adapter->pdev->dev;
1094
1095 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1096 dev_info(dev, "MTU must be between %d and %d bytes\n",
1097 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 return -EINVAL;
1099 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301100
1101 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301102 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 netdev->mtu = new_mtu;
1104 return 0;
1105}
1106
1107/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001108 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1109 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110 */
Sathya Perla10329df2012-06-05 19:37:18 +00001111static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112{
Vasundhara Volam50762662014-09-12 17:39:14 +05301113 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001114 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301115 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001116 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001117
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001118 /* No need to further configure vids if in promiscuous mode */
1119 if (adapter->promiscuous)
1120 return 0;
1121
Sathya Perla92bf14a2013-08-27 16:57:32 +05301122 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001123 goto set_vlan_promisc;
1124
1125 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301126 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1127 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001128
Kalesh AP4d567d92014-05-09 13:29:17 +05301129 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001130 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001131 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301132 if (addl_status(status) ==
1133 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001134 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301135 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136 } else {
1137 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1138 /* hw VLAN filtering re-enabled. */
1139 status = be_cmd_rx_filter(adapter,
1140 BE_FLAGS_VLAN_PROMISC, OFF);
1141 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301142 dev_info(dev,
1143 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001144 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001145 }
1146 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001148
Sathya Perlab31c50a2009-09-17 10:30:13 -07001149 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001150
1151set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301152 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1153 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001154
1155 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1156 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301157 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001158 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1159 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301160 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001161 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162}
1163
Patrick McHardy80d5c362013-04-19 02:04:28 +00001164static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165{
1166 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001167 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001169 /* Packets with VID 0 are always received by Lancer by default */
1170 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301171 return status;
1172
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301173 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301174 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001175
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301176 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301177 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001178
Somnath Kotura6b74e02014-01-21 15:50:55 +05301179 status = be_vid_config(adapter);
1180 if (status) {
1181 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301182 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301183 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301184
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001185 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186}
1187
Patrick McHardy80d5c362013-04-19 02:04:28 +00001188static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
1191
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001192 /* Packets with VID 0 are always received by Lancer by default */
1193 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301194 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001195
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301196 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301197 adapter->vlans_added--;
1198
1199 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200}
1201
Somnath kotur7ad09452014-03-03 14:24:43 +05301202static void be_clear_promisc(struct be_adapter *adapter)
1203{
1204 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301205 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301206
1207 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1208}
1209
Sathya Perlaa54769f2011-10-24 02:45:00 +00001210static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211{
1212 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001213 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
1215 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001216 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001217 adapter->promiscuous = true;
1218 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001220
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001221 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001222 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301223 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001224 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001225 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001226 }
1227
Sathya Perlae7b909a2009-11-22 22:01:10 +00001228 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001229 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301230 netdev_mc_count(netdev) > be_max_mc(adapter))
1231 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001232
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001233 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1234 struct netdev_hw_addr *ha;
1235 int i = 1; /* First slot is claimed by the Primary MAC */
1236
1237 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1238 be_cmd_pmac_del(adapter, adapter->if_handle,
1239 adapter->pmac_id[i], 0);
1240 }
1241
Sathya Perla92bf14a2013-08-27 16:57:32 +05301242 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001243 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1244 adapter->promiscuous = true;
1245 goto done;
1246 }
1247
1248 netdev_for_each_uc_addr(ha, adapter->netdev) {
1249 adapter->uc_macs++; /* First slot is for Primary MAC */
1250 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1251 adapter->if_handle,
1252 &adapter->pmac_id[adapter->uc_macs], 0);
1253 }
1254 }
1255
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001256 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301257 if (!status) {
1258 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1259 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1260 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001261 }
Kalesh APa0794882014-05-30 19:06:23 +05301262
1263set_mcast_promisc:
1264 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1265 return;
1266
1267 /* Set to MCAST promisc mode if setting MULTICAST address fails
1268 * or if num configured exceeds what we support
1269 */
1270 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1271 if (!status)
1272 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001273done:
1274 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275}
1276
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001277static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1278{
1279 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001280 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001281 int status;
1282
Sathya Perla11ac75e2011-12-13 00:58:50 +00001283 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001284 return -EPERM;
1285
Sathya Perla11ac75e2011-12-13 00:58:50 +00001286 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001287 return -EINVAL;
1288
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301289 /* Proceed further only if user provided MAC is different
1290 * from active MAC
1291 */
1292 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1293 return 0;
1294
Sathya Perla3175d8c2013-07-23 15:25:03 +05301295 if (BEx_chip(adapter)) {
1296 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1297 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001298
Sathya Perla11ac75e2011-12-13 00:58:50 +00001299 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1300 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301301 } else {
1302 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1303 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001304 }
1305
Kalesh APabccf232014-07-17 16:20:24 +05301306 if (status) {
1307 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1308 mac, vf, status);
1309 return be_cmd_status(status);
1310 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311
Kalesh APabccf232014-07-17 16:20:24 +05301312 ether_addr_copy(vf_cfg->mac_addr, mac);
1313
1314 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001315}
1316
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001317static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301318 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319{
1320 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001322
Sathya Perla11ac75e2011-12-13 00:58:50 +00001323 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001324 return -EPERM;
1325
Sathya Perla11ac75e2011-12-13 00:58:50 +00001326 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001327 return -EINVAL;
1328
1329 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001330 vi->max_tx_rate = vf_cfg->tx_rate;
1331 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001332 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1333 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001334 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301335 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001336
1337 return 0;
1338}
1339
Sathya Perla748b5392014-05-09 13:29:13 +05301340static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341{
1342 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001343 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001344 int status = 0;
1345
Sathya Perla11ac75e2011-12-13 00:58:50 +00001346 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001347 return -EPERM;
1348
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001349 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001350 return -EINVAL;
1351
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001352 if (vlan || qos) {
1353 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301354 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001355 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1356 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001357 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001358 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301359 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1360 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001361 }
1362
Kalesh APabccf232014-07-17 16:20:24 +05301363 if (status) {
1364 dev_err(&adapter->pdev->dev,
1365 "VLAN %d config on VF %d failed : %#x\n", vlan,
1366 vf, status);
1367 return be_cmd_status(status);
1368 }
1369
1370 vf_cfg->vlan_tag = vlan;
1371
1372 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001373}
1374
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001375static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1376 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001377{
1378 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301379 struct device *dev = &adapter->pdev->dev;
1380 int percent_rate, status = 0;
1381 u16 link_speed = 0;
1382 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001383
Sathya Perla11ac75e2011-12-13 00:58:50 +00001384 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001385 return -EPERM;
1386
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001387 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001388 return -EINVAL;
1389
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001390 if (min_tx_rate)
1391 return -EINVAL;
1392
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301393 if (!max_tx_rate)
1394 goto config_qos;
1395
1396 status = be_cmd_link_status_query(adapter, &link_speed,
1397 &link_status, 0);
1398 if (status)
1399 goto err;
1400
1401 if (!link_status) {
1402 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301403 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301404 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001405 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001406
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301407 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1408 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1409 link_speed);
1410 status = -EINVAL;
1411 goto err;
1412 }
1413
1414 /* On Skyhawk the QOS setting must be done only as a % value */
1415 percent_rate = link_speed / 100;
1416 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1417 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1418 percent_rate);
1419 status = -EINVAL;
1420 goto err;
1421 }
1422
1423config_qos:
1424 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001425 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301426 goto err;
1427
1428 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1429 return 0;
1430
1431err:
1432 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1433 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301434 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001435}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301436
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301437static int be_set_vf_link_state(struct net_device *netdev, int vf,
1438 int link_state)
1439{
1440 struct be_adapter *adapter = netdev_priv(netdev);
1441 int status;
1442
1443 if (!sriov_enabled(adapter))
1444 return -EPERM;
1445
1446 if (vf >= adapter->num_vfs)
1447 return -EINVAL;
1448
1449 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301450 if (status) {
1451 dev_err(&adapter->pdev->dev,
1452 "Link state change on VF %d failed: %#x\n", vf, status);
1453 return be_cmd_status(status);
1454 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301455
Kalesh APabccf232014-07-17 16:20:24 +05301456 adapter->vf_cfg[vf].plink_tracking = link_state;
1457
1458 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301459}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001460
Sathya Perla2632baf2013-10-01 16:00:00 +05301461static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1462 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463{
Sathya Perla2632baf2013-10-01 16:00:00 +05301464 aic->rx_pkts_prev = rx_pkts;
1465 aic->tx_reqs_prev = tx_pkts;
1466 aic->jiffies = now;
1467}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001468
Sathya Perla2632baf2013-10-01 16:00:00 +05301469static void be_eqd_update(struct be_adapter *adapter)
1470{
1471 struct be_set_eqd set_eqd[MAX_EVT_QS];
1472 int eqd, i, num = 0, start;
1473 struct be_aic_obj *aic;
1474 struct be_eq_obj *eqo;
1475 struct be_rx_obj *rxo;
1476 struct be_tx_obj *txo;
1477 u64 rx_pkts, tx_pkts;
1478 ulong now;
1479 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001480
Sathya Perla2632baf2013-10-01 16:00:00 +05301481 for_all_evt_queues(adapter, eqo, i) {
1482 aic = &adapter->aic_obj[eqo->idx];
1483 if (!aic->enable) {
1484 if (aic->jiffies)
1485 aic->jiffies = 0;
1486 eqd = aic->et_eqd;
1487 goto modify_eqd;
1488 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489
Sathya Perla2632baf2013-10-01 16:00:00 +05301490 rxo = &adapter->rx_obj[eqo->idx];
1491 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001492 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301493 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001494 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001495
Sathya Perla2632baf2013-10-01 16:00:00 +05301496 txo = &adapter->tx_obj[eqo->idx];
1497 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001498 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301499 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001500 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001501
Sathya Perla2632baf2013-10-01 16:00:00 +05301502 /* Skip, if wrapped around or first calculation */
1503 now = jiffies;
1504 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1505 rx_pkts < aic->rx_pkts_prev ||
1506 tx_pkts < aic->tx_reqs_prev) {
1507 be_aic_update(aic, rx_pkts, tx_pkts, now);
1508 continue;
1509 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001510
Sathya Perla2632baf2013-10-01 16:00:00 +05301511 delta = jiffies_to_msecs(now - aic->jiffies);
1512 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1513 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1514 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001515
Sathya Perla2632baf2013-10-01 16:00:00 +05301516 if (eqd < 8)
1517 eqd = 0;
1518 eqd = min_t(u32, eqd, aic->max_eqd);
1519 eqd = max_t(u32, eqd, aic->min_eqd);
1520
1521 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001522modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301523 if (eqd != aic->prev_eqd) {
1524 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1525 set_eqd[num].eq_id = eqo->q.id;
1526 aic->prev_eqd = eqd;
1527 num++;
1528 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001529 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301530
1531 if (num)
1532 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001533}
1534
Sathya Perla3abcded2010-10-03 22:12:27 -07001535static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301536 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001537{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001538 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001539
Sathya Perlaab1594e2011-07-25 19:10:15 +00001540 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001541 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001542 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001543 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001544 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001545 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001546 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001547 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001548 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549}
1550
Sathya Perla2e588f82011-03-11 02:49:26 +00001551static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001552{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001553 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301554 * Also ignore ipcksm for ipv6 pkts
1555 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001556 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301557 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001558}
1559
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301560static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001562 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001564 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301565 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566
Sathya Perla3abcded2010-10-03 22:12:27 -07001567 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 BUG_ON(!rx_page_info->page);
1569
Sathya Perlae50287b2014-03-04 12:14:38 +05301570 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001571 dma_unmap_page(&adapter->pdev->dev,
1572 dma_unmap_addr(rx_page_info, bus),
1573 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301574 rx_page_info->last_frag = false;
1575 } else {
1576 dma_sync_single_for_cpu(&adapter->pdev->dev,
1577 dma_unmap_addr(rx_page_info, bus),
1578 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001579 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301581 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 atomic_dec(&rxq->used);
1583 return rx_page_info;
1584}
1585
1586/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001587static void be_rx_compl_discard(struct be_rx_obj *rxo,
1588 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001591 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001593 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301594 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001595 put_page(page_info->page);
1596 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 }
1598}
1599
1600/*
1601 * skb_fill_rx_data forms a complete skb for an ether frame
1602 * indicated by rxcp.
1603 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001604static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1605 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001608 u16 i, j;
1609 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 u8 *start;
1611
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301612 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 start = page_address(page_info->page) + page_info->page_offset;
1614 prefetch(start);
1615
1616 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001617 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 skb->len = curr_frag_len;
1620 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001621 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 /* Complete packet has now been moved to data */
1623 put_page(page_info->page);
1624 skb->data_len = 0;
1625 skb->tail += curr_frag_len;
1626 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001627 hdr_len = ETH_HLEN;
1628 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001630 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 skb_shinfo(skb)->frags[0].page_offset =
1632 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301633 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1634 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001636 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 skb->tail += hdr_len;
1638 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001639 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640
Sathya Perla2e588f82011-03-11 02:49:26 +00001641 if (rxcp->pkt_size <= rx_frag_size) {
1642 BUG_ON(rxcp->num_rcvd != 1);
1643 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644 }
1645
1646 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001647 remaining = rxcp->pkt_size - curr_frag_len;
1648 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301649 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001650 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001652 /* Coalesce all frags from the same physical page in one slot */
1653 if (page_info->page_offset == 0) {
1654 /* Fresh page */
1655 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001656 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001657 skb_shinfo(skb)->frags[j].page_offset =
1658 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001659 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001660 skb_shinfo(skb)->nr_frags++;
1661 } else {
1662 put_page(page_info->page);
1663 }
1664
Eric Dumazet9e903e02011-10-18 21:00:24 +00001665 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 skb->len += curr_frag_len;
1667 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001668 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001669 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001670 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001672 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673}
1674
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001675/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301676static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001677 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001679 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001680 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001682
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001683 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001684 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001685 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001686 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687 return;
1688 }
1689
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001690 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001692 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001693 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001694 else
1695 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001697 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001698 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001699 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001700 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301701
Tom Herbertb6c0e892014-08-27 21:27:17 -07001702 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301703 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704
Jiri Pirko343e43c2011-08-25 02:50:51 +00001705 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001706 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001707
1708 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709}
1710
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001711/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001712static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1713 struct napi_struct *napi,
1714 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001716 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001718 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 u16 remaining, curr_frag_len;
1720 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001721
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001722 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001723 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001724 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001725 return;
1726 }
1727
Sathya Perla2e588f82011-03-11 02:49:26 +00001728 remaining = rxcp->pkt_size;
1729 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301730 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
1732 curr_frag_len = min(remaining, rx_frag_size);
1733
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001734 /* Coalesce all frags from the same physical page in one slot */
1735 if (i == 0 || page_info->page_offset == 0) {
1736 /* First frag or Fresh page */
1737 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001738 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001739 skb_shinfo(skb)->frags[j].page_offset =
1740 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001741 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001742 } else {
1743 put_page(page_info->page);
1744 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001745 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001746 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 memset(page_info, 0, sizeof(*page_info));
1749 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001750 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001752 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001753 skb->len = rxcp->pkt_size;
1754 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001755 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001756 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001757 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001758 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301759
Tom Herbertb6c0e892014-08-27 21:27:17 -07001760 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301761 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001762
Jiri Pirko343e43c2011-08-25 02:50:51 +00001763 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001764 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001765
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001766 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767}
1768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001769static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1770 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301772 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1773 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1774 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1775 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1776 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1777 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1778 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1779 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1780 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1781 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1782 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001783 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301784 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1785 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001786 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301787 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301788 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301789 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001792static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1793 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001794{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301795 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1796 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1797 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1798 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1799 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1800 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1801 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1802 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1803 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1804 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1805 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001806 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301807 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1808 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001809 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301810 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1811 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001812}
1813
1814static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1815{
1816 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1817 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1818 struct be_adapter *adapter = rxo->adapter;
1819
1820 /* For checking the valid bit it is Ok to use either definition as the
1821 * valid bit is at the same position in both v0 and v1 Rx compl */
1822 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 return NULL;
1824
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001825 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001826 be_dws_le_to_cpu(compl, sizeof(*compl));
1827
1828 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001829 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001830 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001831 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001832
Somnath Koture38b1702013-05-29 22:55:56 +00001833 if (rxcp->ip_frag)
1834 rxcp->l4_csum = 0;
1835
Sathya Perla15d72182011-03-21 20:49:26 +00001836 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301837 /* In QNQ modes, if qnq bit is not set, then the packet was
1838 * tagged only with the transparent outer vlan-tag and must
1839 * not be treated as a vlan packet by host
1840 */
1841 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001842 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001843
Sathya Perla15d72182011-03-21 20:49:26 +00001844 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001845 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001846
Somnath Kotur939cf302011-08-18 21:51:49 -07001847 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301848 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001849 rxcp->vlanf = 0;
1850 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001851
1852 /* As the compl has been parsed, reset it; we wont touch it again */
1853 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perla3abcded2010-10-03 22:12:27 -07001855 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 return rxcp;
1857}
1858
Eric Dumazet1829b082011-03-01 05:48:12 +00001859static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001862
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001864 gfp |= __GFP_COMP;
1865 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866}
1867
1868/*
1869 * Allocate a page, split it to fragments of size rx_frag_size and post as
1870 * receive buffers to BE
1871 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301872static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873{
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001875 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001878 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 struct be_eth_rx_d *rxd;
1880 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301881 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882
Sathya Perla3abcded2010-10-03 22:12:27 -07001883 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301884 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001886 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001888 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889 break;
1890 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001891 page_dmaaddr = dma_map_page(dev, pagep, 0,
1892 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001893 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001894 if (dma_mapping_error(dev, page_dmaaddr)) {
1895 put_page(pagep);
1896 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301897 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001898 break;
1899 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 } else {
1902 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301903 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301905 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
1908 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301909 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1911 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912
1913 /* Any space left in the current big page for another frag? */
1914 if ((page_offset + rx_frag_size + rx_frag_size) >
1915 adapter->big_page_size) {
1916 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301917 page_info->last_frag = true;
1918 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1919 } else {
1920 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001922
1923 prev_page_info = page_info;
1924 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301927
1928 /* Mark the last frag of a page when we break out of the above loop
1929 * with no more slots available in the RXQ
1930 */
1931 if (pagep) {
1932 prev_page_info->last_frag = true;
1933 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1934 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935
1936 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301938 if (rxo->rx_post_starved)
1939 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301940 do {
1941 notify = min(256u, posted);
1942 be_rxq_notify(adapter, rxq->id, notify);
1943 posted -= notify;
1944 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001945 } else if (atomic_read(&rxq->used) == 0) {
1946 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001947 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949}
1950
Sathya Perla5fb379e2009-06-18 00:02:59 +00001951static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1954
1955 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1956 return NULL;
1957
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001958 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1960
1961 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1962
1963 queue_tail_inc(tx_cq);
1964 return txcp;
1965}
1966
Sathya Perla3c8def92011-06-12 20:01:58 +00001967static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301968 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969{
Sathya Perla3c8def92011-06-12 20:01:58 +00001970 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001971 struct be_queue_info *txq = &txo->q;
1972 u16 frag_index, num_wrbs = 0;
1973 struct sk_buff *skb = NULL;
1974 bool unmap_skb_hdr = false;
1975 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001977 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001978 if (sent_skbs[txq->tail]) {
1979 /* Free skb from prev req */
1980 if (skb)
1981 dev_consume_skb_any(skb);
1982 skb = sent_skbs[txq->tail];
1983 sent_skbs[txq->tail] = NULL;
1984 queue_tail_inc(txq); /* skip hdr wrb */
1985 num_wrbs++;
1986 unmap_skb_hdr = true;
1987 }
Alexander Duycka73b7962009-12-02 16:48:18 +00001988 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001989 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001990 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001991 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001992 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001994 num_wrbs++;
1995 } while (frag_index != last_index);
1996 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001998 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999}
2000
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001/* Return the number of events in the event queue */
2002static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002003{
2004 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007 do {
2008 eqe = queue_tail_node(&eqo->q);
2009 if (eqe->evt == 0)
2010 break;
2011
2012 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002013 eqe->evt = 0;
2014 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 queue_tail_inc(&eqo->q);
2016 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002017
2018 return num;
2019}
2020
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021/* Leaves the EQ is disarmed state */
2022static void be_eq_clean(struct be_eq_obj *eqo)
2023{
2024 int num = events_get(eqo);
2025
2026 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2027}
2028
2029static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030{
2031 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002032 struct be_queue_info *rxq = &rxo->q;
2033 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002034 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002035 struct be_adapter *adapter = rxo->adapter;
2036 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
Sathya Perlad23e9462012-12-17 19:38:51 +00002038 /* Consume pending rx completions.
2039 * Wait for the flush completion (identified by zero num_rcvd)
2040 * to arrive. Notify CQ even when there are no more CQ entries
2041 * for HW to flush partially coalesced CQ entries.
2042 * In Lancer, there is no need to wait for flush compl.
2043 */
2044 for (;;) {
2045 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302046 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002047 if (lancer_chip(adapter))
2048 break;
2049
2050 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2051 dev_warn(&adapter->pdev->dev,
2052 "did not receive flush compl\n");
2053 break;
2054 }
2055 be_cq_notify(adapter, rx_cq->id, true, 0);
2056 mdelay(1);
2057 } else {
2058 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002059 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002060 if (rxcp->num_rcvd == 0)
2061 break;
2062 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 }
2064
Sathya Perlad23e9462012-12-17 19:38:51 +00002065 /* After cleanup, leave the CQ in unarmed state */
2066 be_cq_notify(adapter, rx_cq->id, false, 0);
2067
2068 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302069 while (atomic_read(&rxq->used) > 0) {
2070 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071 put_page(page_info->page);
2072 memset(page_info, 0, sizeof(*page_info));
2073 }
2074 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302075 rxq->tail = 0;
2076 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077}
2078
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002079static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002081 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2082 struct device *dev = &adapter->pdev->dev;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002083 struct be_tx_obj *txo;
2084 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002085 struct be_eth_tx_compl *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002086 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302088 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002089 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002090 pending_txqs = adapter->num_tx_qs;
2091
2092 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302093 cmpl = 0;
2094 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002095 txq = &txo->q;
2096 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302097 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002098 num_wrbs += be_tx_compl_process(adapter, txo,
2099 end_idx);
2100 cmpl++;
2101 }
2102 if (cmpl) {
2103 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2104 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302105 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002106 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002107 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002108 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002109 }
2110
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302111 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002112 break;
2113
2114 mdelay(1);
2115 } while (true);
2116
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002117 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002118 for_all_tx_queues(adapter, txo, i) {
2119 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002120
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002121 if (atomic_read(&txq->used)) {
2122 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2123 i, atomic_read(&txq->used));
2124 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002125 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002126 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2127 txq->len);
2128 /* Use the tx-compl process logic to handle requests
2129 * that were not sent to the HW.
2130 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002131 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2132 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002133 BUG_ON(atomic_read(&txq->used));
2134 txo->pend_wrb_cnt = 0;
2135 /* Since hw was never notified of these requests,
2136 * reset TXQ indices
2137 */
2138 txq->head = notified_idx;
2139 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002140 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142}
2143
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144static void be_evt_queues_destroy(struct be_adapter *adapter)
2145{
2146 struct be_eq_obj *eqo;
2147 int i;
2148
2149 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002150 if (eqo->q.created) {
2151 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302153 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302154 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002155 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 be_queue_free(adapter, &eqo->q);
2157 }
2158}
2159
2160static int be_evt_queues_create(struct be_adapter *adapter)
2161{
2162 struct be_queue_info *eq;
2163 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302164 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 int i, rc;
2166
Sathya Perla92bf14a2013-08-27 16:57:32 +05302167 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2168 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169
2170 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302171 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2172 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302173 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302174 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302177 aic->max_eqd = BE_MAX_EQD;
2178 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179
2180 eq = &eqo->q;
2181 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302182 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183 if (rc)
2184 return rc;
2185
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302186 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187 if (rc)
2188 return rc;
2189 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002190 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191}
2192
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193static void be_mcc_queues_destroy(struct be_adapter *adapter)
2194{
2195 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196
Sathya Perla8788fdc2009-07-27 22:52:03 +00002197 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002199 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 be_queue_free(adapter, q);
2201
Sathya Perla8788fdc2009-07-27 22:52:03 +00002202 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002203 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002204 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002205 be_queue_free(adapter, q);
2206}
2207
2208/* Must be called only after TX qs are created as MCC shares TX EQ */
2209static int be_mcc_queues_create(struct be_adapter *adapter)
2210{
2211 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002212
Sathya Perla8788fdc2009-07-27 22:52:03 +00002213 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002214 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302215 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002216 goto err;
2217
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 /* Use the default EQ for MCC completions */
2219 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002220 goto mcc_cq_free;
2221
Sathya Perla8788fdc2009-07-27 22:52:03 +00002222 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002223 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2224 goto mcc_cq_destroy;
2225
Sathya Perla8788fdc2009-07-27 22:52:03 +00002226 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002227 goto mcc_q_free;
2228
2229 return 0;
2230
2231mcc_q_free:
2232 be_queue_free(adapter, q);
2233mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002234 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002235mcc_cq_free:
2236 be_queue_free(adapter, cq);
2237err:
2238 return -1;
2239}
2240
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241static void be_tx_queues_destroy(struct be_adapter *adapter)
2242{
2243 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002244 struct be_tx_obj *txo;
2245 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246
Sathya Perla3c8def92011-06-12 20:01:58 +00002247 for_all_tx_queues(adapter, txo, i) {
2248 q = &txo->q;
2249 if (q->created)
2250 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2251 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252
Sathya Perla3c8def92011-06-12 20:01:58 +00002253 q = &txo->cq;
2254 if (q->created)
2255 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2256 be_queue_free(adapter, q);
2257 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258}
2259
Sathya Perla77071332013-08-27 16:57:34 +05302260static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002263 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302264 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265
Sathya Perla92bf14a2013-08-27 16:57:32 +05302266 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002267
Sathya Perla3c8def92011-06-12 20:01:58 +00002268 for_all_tx_queues(adapter, txo, i) {
2269 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2271 sizeof(struct be_eth_tx_compl));
2272 if (status)
2273 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274
John Stultz827da442013-10-07 15:51:58 -07002275 u64_stats_init(&txo->stats.sync);
2276 u64_stats_init(&txo->stats.sync_compl);
2277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 /* If num_evt_qs is less than num_tx_qs, then more than
2279 * one txq share an eq
2280 */
2281 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2282 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2283 if (status)
2284 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2287 sizeof(struct be_eth_wrb));
2288 if (status)
2289 return status;
2290
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002291 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 if (status)
2293 return status;
2294 }
2295
Sathya Perlad3791422012-09-28 04:39:44 +00002296 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2297 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 return 0;
2299}
2300
2301static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302{
2303 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 struct be_rx_obj *rxo;
2305 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306
Sathya Perla3abcded2010-10-03 22:12:27 -07002307 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 q = &rxo->cq;
2309 if (q->created)
2310 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2311 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313}
2314
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002316{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002318 struct be_rx_obj *rxo;
2319 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
Sathya Perla92bf14a2013-08-27 16:57:32 +05302321 /* We can create as many RSS rings as there are EQs. */
2322 adapter->num_rx_qs = adapter->num_evt_qs;
2323
2324 /* We'll use RSS only if atleast 2 RSS rings are supported.
2325 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302327 if (adapter->num_rx_qs > 1)
2328 adapter->num_rx_qs++;
2329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002331 for_all_rx_queues(adapter, rxo, i) {
2332 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002333 cq = &rxo->cq;
2334 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302335 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002336 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338
John Stultz827da442013-10-07 15:51:58 -07002339 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2341 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002342 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345
Sathya Perlad3791422012-09-28 04:39:44 +00002346 dev_info(&adapter->pdev->dev,
2347 "created %d RSS queue(s) and 1 default RX queue\n",
2348 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002349 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002350}
2351
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352static irqreturn_t be_intx(int irq, void *dev)
2353{
Sathya Perlae49cc342012-11-27 19:50:02 +00002354 struct be_eq_obj *eqo = dev;
2355 struct be_adapter *adapter = eqo->adapter;
2356 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002358 /* IRQ is not expected when NAPI is scheduled as the EQ
2359 * will not be armed.
2360 * But, this can happen on Lancer INTx where it takes
2361 * a while to de-assert INTx or in BE2 where occasionaly
2362 * an interrupt may be raised even when EQ is unarmed.
2363 * If NAPI is already scheduled, then counting & notifying
2364 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002365 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002366 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002367 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002368 __napi_schedule(&eqo->napi);
2369 if (num_evts)
2370 eqo->spurious_intr = 0;
2371 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002372 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002373
2374 /* Return IRQ_HANDLED only for the the first spurious intr
2375 * after a valid intr to stop the kernel from branding
2376 * this irq as a bad one!
2377 */
2378 if (num_evts || eqo->spurious_intr++ == 0)
2379 return IRQ_HANDLED;
2380 else
2381 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382}
2383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387
Sathya Perla0b545a62012-11-23 00:27:18 +00002388 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2389 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390 return IRQ_HANDLED;
2391}
2392
Sathya Perla2e588f82011-03-11 02:49:26 +00002393static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394{
Somnath Koture38b1702013-05-29 22:55:56 +00002395 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396}
2397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302399 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400{
Sathya Perla3abcded2010-10-03 22:12:27 -07002401 struct be_adapter *adapter = rxo->adapter;
2402 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002403 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302405 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406
2407 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002408 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409 if (!rxcp)
2410 break;
2411
Sathya Perla12004ae2011-08-02 19:57:46 +00002412 /* Is it a flush compl that has no data */
2413 if (unlikely(rxcp->num_rcvd == 0))
2414 goto loop_continue;
2415
2416 /* Discard compl with partial DMA Lancer B0 */
2417 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002419 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002420 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002421
Sathya Perla12004ae2011-08-02 19:57:46 +00002422 /* On BE drop pkts that arrive due to imperfect filtering in
2423 * promiscuous mode on some skews
2424 */
2425 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302426 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002428 goto loop_continue;
2429 }
2430
Sathya Perla6384a4d2013-10-25 10:40:16 +05302431 /* Don't do gro when we're busy_polling */
2432 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002434 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302435 be_rx_compl_process(rxo, napi, rxcp);
2436
Sathya Perla12004ae2011-08-02 19:57:46 +00002437loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302438 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002439 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 }
2441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 if (work_done) {
2443 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002444
Sathya Perla6384a4d2013-10-25 10:40:16 +05302445 /* When an rx-obj gets into post_starved state, just
2446 * let be_worker do the posting.
2447 */
2448 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2449 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302450 be_post_rx_frags(rxo, GFP_ATOMIC,
2451 max_t(u32, MAX_RX_POST,
2452 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002453 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002454
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002455 return work_done;
2456}
2457
Kalesh AP512bb8a2014-09-02 09:56:49 +05302458static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2459{
2460 switch (status) {
2461 case BE_TX_COMP_HDR_PARSE_ERR:
2462 tx_stats(txo)->tx_hdr_parse_err++;
2463 break;
2464 case BE_TX_COMP_NDMA_ERR:
2465 tx_stats(txo)->tx_dma_err++;
2466 break;
2467 case BE_TX_COMP_ACL_ERR:
2468 tx_stats(txo)->tx_spoof_check_err++;
2469 break;
2470 }
2471}
2472
2473static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2474{
2475 switch (status) {
2476 case LANCER_TX_COMP_LSO_ERR:
2477 tx_stats(txo)->tx_tso_err++;
2478 break;
2479 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2480 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2481 tx_stats(txo)->tx_spoof_check_err++;
2482 break;
2483 case LANCER_TX_COMP_QINQ_ERR:
2484 tx_stats(txo)->tx_qinq_err++;
2485 break;
2486 case LANCER_TX_COMP_PARITY_ERR:
2487 tx_stats(txo)->tx_internal_parity_err++;
2488 break;
2489 case LANCER_TX_COMP_DMA_ERR:
2490 tx_stats(txo)->tx_dma_err++;
2491 break;
2492 }
2493}
2494
Sathya Perlac8f64612014-09-02 09:56:55 +05302495static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2496 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302499 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302500 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302501 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502
Sathya Perlac8f64612014-09-02 09:56:55 +05302503 while ((txcp = be_tx_compl_get(&txo->cq))) {
2504 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2505 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2506 work_done++;
2507
Kalesh AP512bb8a2014-09-02 09:56:49 +05302508 compl_status = GET_TX_COMPL_BITS(status, txcp);
2509 if (compl_status) {
2510 if (lancer_chip(adapter))
2511 lancer_update_tx_err(txo, compl_status);
2512 else
2513 be_update_tx_err(txo, compl_status);
2514 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002515 }
2516
2517 if (work_done) {
2518 be_cq_notify(adapter, txo->cq.id, true, work_done);
2519 atomic_sub(num_wrbs, &txo->q.used);
2520
2521 /* As Tx wrbs have been freed up, wake up netdev queue
2522 * if it was stopped due to lack of tx wrbs. */
2523 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302524 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002526 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002527
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002528 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2529 tx_stats(txo)->tx_compl += work_done;
2530 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2531 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532}
Sathya Perla3c8def92011-06-12 20:01:58 +00002533
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002534#ifdef CONFIG_NET_RX_BUSY_POLL
2535static inline bool be_lock_napi(struct be_eq_obj *eqo)
2536{
2537 bool status = true;
2538
2539 spin_lock(&eqo->lock); /* BH is already disabled */
2540 if (eqo->state & BE_EQ_LOCKED) {
2541 WARN_ON(eqo->state & BE_EQ_NAPI);
2542 eqo->state |= BE_EQ_NAPI_YIELD;
2543 status = false;
2544 } else {
2545 eqo->state = BE_EQ_NAPI;
2546 }
2547 spin_unlock(&eqo->lock);
2548 return status;
2549}
2550
2551static inline void be_unlock_napi(struct be_eq_obj *eqo)
2552{
2553 spin_lock(&eqo->lock); /* BH is already disabled */
2554
2555 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2556 eqo->state = BE_EQ_IDLE;
2557
2558 spin_unlock(&eqo->lock);
2559}
2560
2561static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2562{
2563 bool status = true;
2564
2565 spin_lock_bh(&eqo->lock);
2566 if (eqo->state & BE_EQ_LOCKED) {
2567 eqo->state |= BE_EQ_POLL_YIELD;
2568 status = false;
2569 } else {
2570 eqo->state |= BE_EQ_POLL;
2571 }
2572 spin_unlock_bh(&eqo->lock);
2573 return status;
2574}
2575
2576static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2577{
2578 spin_lock_bh(&eqo->lock);
2579
2580 WARN_ON(eqo->state & (BE_EQ_NAPI));
2581 eqo->state = BE_EQ_IDLE;
2582
2583 spin_unlock_bh(&eqo->lock);
2584}
2585
2586static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2587{
2588 spin_lock_init(&eqo->lock);
2589 eqo->state = BE_EQ_IDLE;
2590}
2591
2592static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2593{
2594 local_bh_disable();
2595
2596 /* It's enough to just acquire napi lock on the eqo to stop
2597 * be_busy_poll() from processing any queueus.
2598 */
2599 while (!be_lock_napi(eqo))
2600 mdelay(1);
2601
2602 local_bh_enable();
2603}
2604
2605#else /* CONFIG_NET_RX_BUSY_POLL */
2606
2607static inline bool be_lock_napi(struct be_eq_obj *eqo)
2608{
2609 return true;
2610}
2611
2612static inline void be_unlock_napi(struct be_eq_obj *eqo)
2613{
2614}
2615
2616static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2617{
2618 return false;
2619}
2620
2621static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2622{
2623}
2624
2625static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2626{
2627}
2628
2629static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2630{
2631}
2632#endif /* CONFIG_NET_RX_BUSY_POLL */
2633
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302634int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002635{
2636 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2637 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002638 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302639 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302640 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002641
Sathya Perla0b545a62012-11-23 00:27:18 +00002642 num_evts = events_get(eqo);
2643
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302644 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2645 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646
Sathya Perla6384a4d2013-10-25 10:40:16 +05302647 if (be_lock_napi(eqo)) {
2648 /* This loop will iterate twice for EQ0 in which
2649 * completions of the last RXQ (default one) are also processed
2650 * For other EQs the loop iterates only once
2651 */
2652 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2653 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2654 max_work = max(work, max_work);
2655 }
2656 be_unlock_napi(eqo);
2657 } else {
2658 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002659 }
2660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 if (is_mcc_eqo(eqo))
2662 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002663
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 if (max_work < budget) {
2665 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002666 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 } else {
2668 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002669 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002670 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002671 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002672}
2673
Sathya Perla6384a4d2013-10-25 10:40:16 +05302674#ifdef CONFIG_NET_RX_BUSY_POLL
2675static int be_busy_poll(struct napi_struct *napi)
2676{
2677 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2678 struct be_adapter *adapter = eqo->adapter;
2679 struct be_rx_obj *rxo;
2680 int i, work = 0;
2681
2682 if (!be_lock_busy_poll(eqo))
2683 return LL_FLUSH_BUSY;
2684
2685 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2686 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2687 if (work)
2688 break;
2689 }
2690
2691 be_unlock_busy_poll(eqo);
2692 return work;
2693}
2694#endif
2695
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002696void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002697{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002698 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2699 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002700 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302701 bool error_detected = false;
2702 struct device *dev = &adapter->pdev->dev;
2703 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002704
Sathya Perlad23e9462012-12-17 19:38:51 +00002705 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002706 return;
2707
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002708 if (lancer_chip(adapter)) {
2709 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2710 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2711 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302712 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002713 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302714 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302715 adapter->hw_error = true;
2716 /* Do not log error messages if its a FW reset */
2717 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2718 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2719 dev_info(dev, "Firmware update in progress\n");
2720 } else {
2721 error_detected = true;
2722 dev_err(dev, "Error detected in the card\n");
2723 dev_err(dev, "ERR: sliport status 0x%x\n",
2724 sliport_status);
2725 dev_err(dev, "ERR: sliport error1 0x%x\n",
2726 sliport_err1);
2727 dev_err(dev, "ERR: sliport error2 0x%x\n",
2728 sliport_err2);
2729 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002730 }
2731 } else {
2732 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302733 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002734 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302735 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002736 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302737 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002738 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302739 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002740
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002741 ue_lo = (ue_lo & ~ue_lo_mask);
2742 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002743
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302744 /* On certain platforms BE hardware can indicate spurious UEs.
2745 * Allow HW to stop working completely in case of a real UE.
2746 * Hence not setting the hw_error for UE detection.
2747 */
2748
2749 if (ue_lo || ue_hi) {
2750 error_detected = true;
2751 dev_err(dev,
2752 "Unrecoverable Error detected in the adapter");
2753 dev_err(dev, "Please reboot server to recover");
2754 if (skyhawk_chip(adapter))
2755 adapter->hw_error = true;
2756 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2757 if (ue_lo & 1)
2758 dev_err(dev, "UE: %s bit set\n",
2759 ue_status_low_desc[i]);
2760 }
2761 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2762 if (ue_hi & 1)
2763 dev_err(dev, "UE: %s bit set\n",
2764 ue_status_hi_desc[i]);
2765 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302766 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002767 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302768 if (error_detected)
2769 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002770}
2771
Sathya Perla8d56ff12009-11-22 22:02:26 +00002772static void be_msix_disable(struct be_adapter *adapter)
2773{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002774 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002775 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002776 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302777 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002778 }
2779}
2780
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002781static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002782{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002783 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002784 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785
Sathya Perla92bf14a2013-08-27 16:57:32 +05302786 /* If RoCE is supported, program the max number of NIC vectors that
2787 * may be configured via set-channels, along with vectors needed for
2788 * RoCe. Else, just program the number we'll use initially.
2789 */
2790 if (be_roce_supported(adapter))
2791 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2792 2 * num_online_cpus());
2793 else
2794 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002795
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002796 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002797 adapter->msix_entries[i].entry = i;
2798
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002799 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2800 MIN_MSIX_VECTORS, num_vec);
2801 if (num_vec < 0)
2802 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002803
Sathya Perla92bf14a2013-08-27 16:57:32 +05302804 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2805 adapter->num_msix_roce_vec = num_vec / 2;
2806 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2807 adapter->num_msix_roce_vec);
2808 }
2809
2810 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2811
2812 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2813 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002814 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002815
2816fail:
2817 dev_warn(dev, "MSIx enable failed\n");
2818
2819 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2820 if (!be_physfn(adapter))
2821 return num_vec;
2822 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002823}
2824
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002825static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302826 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002827{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302828 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829}
2830
2831static int be_msix_register(struct be_adapter *adapter)
2832{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833 struct net_device *netdev = adapter->netdev;
2834 struct be_eq_obj *eqo;
2835 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837 for_all_evt_queues(adapter, eqo, i) {
2838 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2839 vec = be_msix_vec_get(adapter, eqo);
2840 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002841 if (status)
2842 goto err_msix;
2843 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002844
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002846err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2848 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2849 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302850 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002851 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002852 return status;
2853}
2854
2855static int be_irq_register(struct be_adapter *adapter)
2856{
2857 struct net_device *netdev = adapter->netdev;
2858 int status;
2859
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002860 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861 status = be_msix_register(adapter);
2862 if (status == 0)
2863 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002864 /* INTx is not supported for VF */
2865 if (!be_physfn(adapter))
2866 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002867 }
2868
Sathya Perlae49cc342012-11-27 19:50:02 +00002869 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002870 netdev->irq = adapter->pdev->irq;
2871 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002872 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002873 if (status) {
2874 dev_err(&adapter->pdev->dev,
2875 "INTx request IRQ failed - err %d\n", status);
2876 return status;
2877 }
2878done:
2879 adapter->isr_registered = true;
2880 return 0;
2881}
2882
2883static void be_irq_unregister(struct be_adapter *adapter)
2884{
2885 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002886 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002887 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002888
2889 if (!adapter->isr_registered)
2890 return;
2891
2892 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002893 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002894 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002895 goto done;
2896 }
2897
2898 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002899 for_all_evt_queues(adapter, eqo, i)
2900 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002901
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902done:
2903 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002904}
2905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002906static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002907{
2908 struct be_queue_info *q;
2909 struct be_rx_obj *rxo;
2910 int i;
2911
2912 for_all_rx_queues(adapter, rxo, i) {
2913 q = &rxo->q;
2914 if (q->created) {
2915 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002916 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002917 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002918 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002919 }
2920}
2921
Sathya Perla889cd4b2010-05-30 23:33:45 +00002922static int be_close(struct net_device *netdev)
2923{
2924 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 struct be_eq_obj *eqo;
2926 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002927
Kalesh APe1ad8e32014-04-14 16:12:41 +05302928 /* This protection is needed as be_close() may be called even when the
2929 * adapter is in cleared state (after eeh perm failure)
2930 */
2931 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2932 return 0;
2933
Parav Pandit045508a2012-03-26 14:27:13 +00002934 be_roce_dev_close(adapter);
2935
Ivan Veceradff345c52013-11-27 08:59:32 +01002936 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2937 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002938 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302939 be_disable_busy_poll(eqo);
2940 }
David S. Miller71237b62013-11-28 18:53:36 -05002941 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002942 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002943
2944 be_async_mcc_disable(adapter);
2945
2946 /* Wait for all pending tx completions to arrive so that
2947 * all tx skbs are freed.
2948 */
Sathya Perlafba87552013-05-08 02:05:50 +00002949 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302950 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002951
2952 be_rx_qs_destroy(adapter);
2953
Ajit Khaparded11a3472013-11-18 10:44:37 -06002954 for (i = 1; i < (adapter->uc_macs + 1); i++)
2955 be_cmd_pmac_del(adapter, adapter->if_handle,
2956 adapter->pmac_id[i], 0);
2957 adapter->uc_macs = 0;
2958
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002959 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 if (msix_enabled(adapter))
2961 synchronize_irq(be_msix_vec_get(adapter, eqo));
2962 else
2963 synchronize_irq(netdev->irq);
2964 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002965 }
2966
Sathya Perla889cd4b2010-05-30 23:33:45 +00002967 be_irq_unregister(adapter);
2968
Sathya Perla482c9e72011-06-29 23:33:17 +00002969 return 0;
2970}
2971
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002972static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002973{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002974 struct rss_info *rss = &adapter->rss_info;
2975 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00002976 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002977 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00002978
2979 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002980 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2981 sizeof(struct be_eth_rx_d));
2982 if (rc)
2983 return rc;
2984 }
2985
2986 /* The FW would like the default RXQ to be created first */
2987 rxo = default_rxo(adapter);
2988 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2989 adapter->if_handle, false, &rxo->rss_id);
2990 if (rc)
2991 return rc;
2992
2993 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002994 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002995 rx_frag_size, adapter->if_handle,
2996 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002997 if (rc)
2998 return rc;
2999 }
3000
3001 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303002 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3003 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003004 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303005 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003006 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303007 rss->rsstable[j + i] = rxo->rss_id;
3008 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003009 }
3010 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303011 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3012 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003013
3014 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303015 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3016 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303017 } else {
3018 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303019 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303020 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003021
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003022 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303023 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003024 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303025 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303026 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303027 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003028 }
3029
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003030 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303031
Sathya Perla482c9e72011-06-29 23:33:17 +00003032 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003033 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303034 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003035 return 0;
3036}
3037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038static int be_open(struct net_device *netdev)
3039{
3040 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003041 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003042 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003043 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003044 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003045 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003047 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003048 if (status)
3049 goto err;
3050
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003051 status = be_irq_register(adapter);
3052 if (status)
3053 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003054
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003055 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003056 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003058 for_all_tx_queues(adapter, txo, i)
3059 be_cq_notify(adapter, txo->cq.id, true, 0);
3060
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003061 be_async_mcc_enable(adapter);
3062
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003063 for_all_evt_queues(adapter, eqo, i) {
3064 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303065 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303066 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003067 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003068 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003069
Sathya Perla323ff712012-09-28 04:39:43 +00003070 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003071 if (!status)
3072 be_link_status_update(adapter, link_status);
3073
Sathya Perlafba87552013-05-08 02:05:50 +00003074 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003075 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303076
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303077#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303078 if (skyhawk_chip(adapter))
3079 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303080#endif
3081
Sathya Perla889cd4b2010-05-30 23:33:45 +00003082 return 0;
3083err:
3084 be_close(adapter->netdev);
3085 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003086}
3087
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003088static int be_setup_wol(struct be_adapter *adapter, bool enable)
3089{
3090 struct be_dma_mem cmd;
3091 int status = 0;
3092 u8 mac[ETH_ALEN];
3093
3094 memset(mac, 0, ETH_ALEN);
3095
3096 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07003097 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3098 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303099 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303100 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003101
3102 if (enable) {
3103 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303104 PCICFG_PM_CONTROL_OFFSET,
3105 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003106 if (status) {
3107 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003108 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003109 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3110 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003111 return status;
3112 }
3113 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303114 adapter->netdev->dev_addr,
3115 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003116 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3117 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3118 } else {
3119 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3120 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3121 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3122 }
3123
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003124 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003125 return status;
3126}
3127
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003128static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3129{
3130 u32 addr;
3131
3132 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3133
3134 mac[5] = (u8)(addr & 0xFF);
3135 mac[4] = (u8)((addr >> 8) & 0xFF);
3136 mac[3] = (u8)((addr >> 16) & 0xFF);
3137 /* Use the OUI from the current MAC address */
3138 memcpy(mac, adapter->netdev->dev_addr, 3);
3139}
3140
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003141/*
3142 * Generate a seed MAC address from the PF MAC Address using jhash.
3143 * MAC Address for VFs are assigned incrementally starting from the seed.
3144 * These addresses are programmed in the ASIC by the PF and the VF driver
3145 * queries for the MAC address during its probe.
3146 */
Sathya Perla4c876612013-02-03 20:30:11 +00003147static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003148{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003149 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003150 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003151 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003152 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003153
3154 be_vf_eth_addr_generate(adapter, mac);
3155
Sathya Perla11ac75e2011-12-13 00:58:50 +00003156 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303157 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003158 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003159 vf_cfg->if_handle,
3160 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303161 else
3162 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3163 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003164
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003165 if (status)
3166 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303167 "Mac address assignment failed for VF %d\n",
3168 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003169 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003170 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003171
3172 mac[5] += 1;
3173 }
3174 return status;
3175}
3176
Sathya Perla4c876612013-02-03 20:30:11 +00003177static int be_vfs_mac_query(struct be_adapter *adapter)
3178{
3179 int status, vf;
3180 u8 mac[ETH_ALEN];
3181 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003182
3183 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303184 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3185 mac, vf_cfg->if_handle,
3186 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003187 if (status)
3188 return status;
3189 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3190 }
3191 return 0;
3192}
3193
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003194static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003195{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003196 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003197 u32 vf;
3198
Sathya Perla257a3fe2013-06-14 15:54:51 +05303199 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003200 dev_warn(&adapter->pdev->dev,
3201 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003202 goto done;
3203 }
3204
Sathya Perlab4c1df92013-05-08 02:05:47 +00003205 pci_disable_sriov(adapter->pdev);
3206
Sathya Perla11ac75e2011-12-13 00:58:50 +00003207 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303208 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003209 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3210 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303211 else
3212 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3213 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003214
Sathya Perla11ac75e2011-12-13 00:58:50 +00003215 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3216 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003217done:
3218 kfree(adapter->vf_cfg);
3219 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303220 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003221}
3222
Sathya Perla77071332013-08-27 16:57:34 +05303223static void be_clear_queues(struct be_adapter *adapter)
3224{
3225 be_mcc_queues_destroy(adapter);
3226 be_rx_cqs_destroy(adapter);
3227 be_tx_queues_destroy(adapter);
3228 be_evt_queues_destroy(adapter);
3229}
3230
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303231static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003232{
Sathya Perla191eb752012-02-23 18:50:13 +00003233 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3234 cancel_delayed_work_sync(&adapter->work);
3235 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3236 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303237}
3238
Somnath Koturb05004a2013-12-05 12:08:16 +05303239static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303240{
3241 int i;
3242
Somnath Koturb05004a2013-12-05 12:08:16 +05303243 if (adapter->pmac_id) {
3244 for (i = 0; i < (adapter->uc_macs + 1); i++)
3245 be_cmd_pmac_del(adapter, adapter->if_handle,
3246 adapter->pmac_id[i], 0);
3247 adapter->uc_macs = 0;
3248
3249 kfree(adapter->pmac_id);
3250 adapter->pmac_id = NULL;
3251 }
3252}
3253
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303254#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303255static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3256{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003257 struct net_device *netdev = adapter->netdev;
3258
Sathya Perlac9c47142014-03-27 10:46:19 +05303259 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3260 be_cmd_manage_iface(adapter, adapter->if_handle,
3261 OP_CONVERT_TUNNEL_TO_NORMAL);
3262
3263 if (adapter->vxlan_port)
3264 be_cmd_set_vxlan_port(adapter, 0);
3265
3266 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3267 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003268
3269 netdev->hw_enc_features = 0;
3270 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303271 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303272}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303273#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303274
Somnath Koturb05004a2013-12-05 12:08:16 +05303275static int be_clear(struct be_adapter *adapter)
3276{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303277 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003278
Sathya Perla11ac75e2011-12-13 00:58:50 +00003279 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003280 be_vf_clear(adapter);
3281
Vasundhara Volambec84e62014-06-30 13:01:32 +05303282 /* Re-configure FW to distribute resources evenly across max-supported
3283 * number of VFs, only when VFs are not already enabled.
3284 */
3285 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3286 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3287 pci_sriov_get_totalvfs(adapter->pdev));
3288
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303289#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303290 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303291#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303292 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303293 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003294
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003295 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003296
Sathya Perla77071332013-08-27 16:57:34 +05303297 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003298
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003299 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303300 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003301 return 0;
3302}
3303
Kalesh AP0700d812015-01-20 03:51:43 -05003304static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3305 u32 cap_flags, u32 vf)
3306{
3307 u32 en_flags;
3308 int status;
3309
3310 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3311 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3312 BE_IF_FLAGS_RSS;
3313
3314 en_flags &= cap_flags;
3315
3316 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3317 if_handle, vf);
3318
3319 return status;
3320}
3321
Sathya Perla4c876612013-02-03 20:30:11 +00003322static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003323{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303324 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003325 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003326 u32 cap_flags, vf;
3327 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003328
Kalesh AP0700d812015-01-20 03:51:43 -05003329 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003330 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3331 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003332
Sathya Perla4c876612013-02-03 20:30:11 +00003333 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303334 if (!BE3_chip(adapter)) {
3335 status = be_cmd_get_profile_config(adapter, &res,
3336 vf + 1);
3337 if (!status)
3338 cap_flags = res.if_cap_flags;
3339 }
Sathya Perla4c876612013-02-03 20:30:11 +00003340
Kalesh AP0700d812015-01-20 03:51:43 -05003341 status = be_if_create(adapter, &vf_cfg->if_handle,
3342 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003343 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003344 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003345 }
Kalesh AP0700d812015-01-20 03:51:43 -05003346
3347 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003348}
3349
Sathya Perla39f1d942012-05-08 19:41:24 +00003350static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003351{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003352 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003353 int vf;
3354
Sathya Perla39f1d942012-05-08 19:41:24 +00003355 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3356 GFP_KERNEL);
3357 if (!adapter->vf_cfg)
3358 return -ENOMEM;
3359
Sathya Perla11ac75e2011-12-13 00:58:50 +00003360 for_all_vfs(adapter, vf_cfg, vf) {
3361 vf_cfg->if_handle = -1;
3362 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003363 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003364 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003365}
3366
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003367static int be_vf_setup(struct be_adapter *adapter)
3368{
Sathya Perla4c876612013-02-03 20:30:11 +00003369 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303370 struct be_vf_cfg *vf_cfg;
3371 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303372 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003373
Sathya Perla257a3fe2013-06-14 15:54:51 +05303374 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003375
3376 status = be_vf_setup_init(adapter);
3377 if (status)
3378 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003379
Sathya Perla4c876612013-02-03 20:30:11 +00003380 if (old_vfs) {
3381 for_all_vfs(adapter, vf_cfg, vf) {
3382 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3383 if (status)
3384 goto err;
3385 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003386
Sathya Perla4c876612013-02-03 20:30:11 +00003387 status = be_vfs_mac_query(adapter);
3388 if (status)
3389 goto err;
3390 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303391 status = be_vfs_if_create(adapter);
3392 if (status)
3393 goto err;
3394
Sathya Perla39f1d942012-05-08 19:41:24 +00003395 status = be_vf_eth_addr_config(adapter);
3396 if (status)
3397 goto err;
3398 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003399
Sathya Perla11ac75e2011-12-13 00:58:50 +00003400 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303401 /* Allow VFs to programs MAC/VLAN filters */
3402 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3403 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3404 status = be_cmd_set_fn_privileges(adapter,
3405 privileges |
3406 BE_PRIV_FILTMGMT,
3407 vf + 1);
3408 if (!status)
3409 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3410 vf);
3411 }
3412
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303413 /* Allow full available bandwidth */
3414 if (!old_vfs)
3415 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003416
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303417 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303418 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303419 be_cmd_set_logical_link_config(adapter,
3420 IFLA_VF_LINK_STATE_AUTO,
3421 vf+1);
3422 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003423 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003424
3425 if (!old_vfs) {
3426 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3427 if (status) {
3428 dev_err(dev, "SRIOV enable failed\n");
3429 adapter->num_vfs = 0;
3430 goto err;
3431 }
3432 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303433
3434 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003435 return 0;
3436err:
Sathya Perla4c876612013-02-03 20:30:11 +00003437 dev_err(dev, "VF setup failed\n");
3438 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003439 return status;
3440}
3441
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303442/* Converting function_mode bits on BE3 to SH mc_type enums */
3443
3444static u8 be_convert_mc_type(u32 function_mode)
3445{
Suresh Reddy66064db2014-06-23 16:41:29 +05303446 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303447 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303448 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303449 return FLEX10;
3450 else if (function_mode & VNIC_MODE)
3451 return vNIC2;
3452 else if (function_mode & UMC_ENABLED)
3453 return UMC;
3454 else
3455 return MC_NONE;
3456}
3457
Sathya Perla92bf14a2013-08-27 16:57:32 +05303458/* On BE2/BE3 FW does not suggest the supported limits */
3459static void BEx_get_resources(struct be_adapter *adapter,
3460 struct be_resources *res)
3461{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303462 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303463
3464 if (be_physfn(adapter))
3465 res->max_uc_mac = BE_UC_PMAC_COUNT;
3466 else
3467 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3468
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303469 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3470
3471 if (be_is_mc(adapter)) {
3472 /* Assuming that there are 4 channels per port,
3473 * when multi-channel is enabled
3474 */
3475 if (be_is_qnq_mode(adapter))
3476 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3477 else
3478 /* In a non-qnq multichannel mode, the pvid
3479 * takes up one vlan entry
3480 */
3481 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3482 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303483 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303484 }
3485
Sathya Perla92bf14a2013-08-27 16:57:32 +05303486 res->max_mcast_mac = BE_MAX_MC;
3487
Vasundhara Volama5243da2014-03-11 18:53:07 +05303488 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3489 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3490 * *only* if it is RSS-capable.
3491 */
3492 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3493 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303494 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303495 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303496 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3497 struct be_resources super_nic_res = {0};
3498
3499 /* On a SuperNIC profile, the driver needs to use the
3500 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3501 */
3502 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3503 /* Some old versions of BE3 FW don't report max_tx_qs value */
3504 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3505 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303506 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303507 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303508
3509 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3510 !use_sriov && be_physfn(adapter))
3511 res->max_rss_qs = (adapter->be3_native) ?
3512 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3513 res->max_rx_qs = res->max_rss_qs + 1;
3514
Suresh Reddye3dc8672014-01-06 13:02:25 +05303515 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303516 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303517 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3518 else
3519 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303520
3521 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3522 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3523 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3524}
3525
Sathya Perla30128032011-11-10 19:17:57 +00003526static void be_setup_init(struct be_adapter *adapter)
3527{
3528 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003529 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003530 adapter->if_handle = -1;
3531 adapter->be3_native = false;
3532 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003533 if (be_physfn(adapter))
3534 adapter->cmd_privileges = MAX_PRIVILEGES;
3535 else
3536 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003537}
3538
Vasundhara Volambec84e62014-06-30 13:01:32 +05303539static int be_get_sriov_config(struct be_adapter *adapter)
3540{
3541 struct device *dev = &adapter->pdev->dev;
3542 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303543 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303544
3545 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303546 be_cmd_get_profile_config(adapter, &res, 0);
3547
Vasundhara Volambec84e62014-06-30 13:01:32 +05303548 if (BE3_chip(adapter) && !res.max_vfs) {
3549 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3550 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3551 }
3552
Sathya Perlad3d18312014-08-01 17:47:30 +05303553 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303554
3555 if (!be_max_vfs(adapter)) {
3556 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303557 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303558 adapter->num_vfs = 0;
3559 return 0;
3560 }
3561
Sathya Perlad3d18312014-08-01 17:47:30 +05303562 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3563
Vasundhara Volambec84e62014-06-30 13:01:32 +05303564 /* validate num_vfs module param */
3565 old_vfs = pci_num_vf(adapter->pdev);
3566 if (old_vfs) {
3567 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3568 if (old_vfs != num_vfs)
3569 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3570 adapter->num_vfs = old_vfs;
3571 } else {
3572 if (num_vfs > be_max_vfs(adapter)) {
3573 dev_info(dev, "Resources unavailable to init %d VFs\n",
3574 num_vfs);
3575 dev_info(dev, "Limiting to %d VFs\n",
3576 be_max_vfs(adapter));
3577 }
3578 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3579 }
3580
3581 return 0;
3582}
3583
Sathya Perla92bf14a2013-08-27 16:57:32 +05303584static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003585{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303586 struct device *dev = &adapter->pdev->dev;
3587 struct be_resources res = {0};
3588 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003589
Sathya Perla92bf14a2013-08-27 16:57:32 +05303590 if (BEx_chip(adapter)) {
3591 BEx_get_resources(adapter, &res);
3592 adapter->res = res;
3593 }
3594
Sathya Perla92bf14a2013-08-27 16:57:32 +05303595 /* For Lancer, SH etc read per-function resource limits from FW.
3596 * GET_FUNC_CONFIG returns per function guaranteed limits.
3597 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3598 */
Sathya Perla4c876612013-02-03 20:30:11 +00003599 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303600 status = be_cmd_get_func_config(adapter, &res);
3601 if (status)
3602 return status;
3603
3604 /* If RoCE may be enabled stash away half the EQs for RoCE */
3605 if (be_roce_supported(adapter))
3606 res.max_evt_qs /= 2;
3607 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003608 }
3609
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303610 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3611 be_max_txqs(adapter), be_max_rxqs(adapter),
3612 be_max_rss(adapter), be_max_eqs(adapter),
3613 be_max_vfs(adapter));
3614 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3615 be_max_uc(adapter), be_max_mc(adapter),
3616 be_max_vlans(adapter));
3617
Sathya Perla92bf14a2013-08-27 16:57:32 +05303618 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003619}
3620
Sathya Perlad3d18312014-08-01 17:47:30 +05303621static void be_sriov_config(struct be_adapter *adapter)
3622{
3623 struct device *dev = &adapter->pdev->dev;
3624 int status;
3625
3626 status = be_get_sriov_config(adapter);
3627 if (status) {
3628 dev_err(dev, "Failed to query SR-IOV configuration\n");
3629 dev_err(dev, "SR-IOV cannot be enabled\n");
3630 return;
3631 }
3632
3633 /* When the HW is in SRIOV capable configuration, the PF-pool
3634 * resources are equally distributed across the max-number of
3635 * VFs. The user may request only a subset of the max-vfs to be
3636 * enabled. Based on num_vfs, redistribute the resources across
3637 * num_vfs so that each VF will have access to more number of
3638 * resources. This facility is not available in BE3 FW.
3639 * Also, this is done by FW in Lancer chip.
3640 */
3641 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3642 status = be_cmd_set_sriov_config(adapter,
3643 adapter->pool_res,
3644 adapter->num_vfs);
3645 if (status)
3646 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3647 }
3648}
3649
Sathya Perla39f1d942012-05-08 19:41:24 +00003650static int be_get_config(struct be_adapter *adapter)
3651{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303652 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003653 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003654
Kalesh APe97e3cd2014-07-17 16:20:26 +05303655 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003656 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303657 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003658
Vasundhara Volam542963b2014-01-15 13:23:33 +05303659 if (be_physfn(adapter)) {
3660 status = be_cmd_get_active_profile(adapter, &profile_id);
3661 if (!status)
3662 dev_info(&adapter->pdev->dev,
3663 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303664 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303665
Sathya Perlad3d18312014-08-01 17:47:30 +05303666 if (!BE2_chip(adapter) && be_physfn(adapter))
3667 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303668
Sathya Perla92bf14a2013-08-27 16:57:32 +05303669 status = be_get_resources(adapter);
3670 if (status)
3671 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003672
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303673 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3674 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303675 if (!adapter->pmac_id)
3676 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003677
Sathya Perla92bf14a2013-08-27 16:57:32 +05303678 /* Sanitize cfg_num_qs based on HW and platform limits */
3679 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3680
3681 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003682}
3683
Sathya Perla95046b92013-07-23 15:25:02 +05303684static int be_mac_setup(struct be_adapter *adapter)
3685{
3686 u8 mac[ETH_ALEN];
3687 int status;
3688
3689 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3690 status = be_cmd_get_perm_mac(adapter, mac);
3691 if (status)
3692 return status;
3693
3694 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3695 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3696 } else {
3697 /* Maybe the HW was reset; dev_addr must be re-programmed */
3698 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3699 }
3700
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003701 /* For BE3-R VFs, the PF programs the initial MAC address */
3702 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3703 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3704 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303705 return 0;
3706}
3707
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303708static void be_schedule_worker(struct be_adapter *adapter)
3709{
3710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3712}
3713
Sathya Perla77071332013-08-27 16:57:34 +05303714static int be_setup_queues(struct be_adapter *adapter)
3715{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303716 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303717 int status;
3718
3719 status = be_evt_queues_create(adapter);
3720 if (status)
3721 goto err;
3722
3723 status = be_tx_qs_create(adapter);
3724 if (status)
3725 goto err;
3726
3727 status = be_rx_cqs_create(adapter);
3728 if (status)
3729 goto err;
3730
3731 status = be_mcc_queues_create(adapter);
3732 if (status)
3733 goto err;
3734
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303735 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3736 if (status)
3737 goto err;
3738
3739 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3740 if (status)
3741 goto err;
3742
Sathya Perla77071332013-08-27 16:57:34 +05303743 return 0;
3744err:
3745 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3746 return status;
3747}
3748
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303749int be_update_queues(struct be_adapter *adapter)
3750{
3751 struct net_device *netdev = adapter->netdev;
3752 int status;
3753
3754 if (netif_running(netdev))
3755 be_close(netdev);
3756
3757 be_cancel_worker(adapter);
3758
3759 /* If any vectors have been shared with RoCE we cannot re-program
3760 * the MSIx table.
3761 */
3762 if (!adapter->num_msix_roce_vec)
3763 be_msix_disable(adapter);
3764
3765 be_clear_queues(adapter);
3766
3767 if (!msix_enabled(adapter)) {
3768 status = be_msix_enable(adapter);
3769 if (status)
3770 return status;
3771 }
3772
3773 status = be_setup_queues(adapter);
3774 if (status)
3775 return status;
3776
3777 be_schedule_worker(adapter);
3778
3779 if (netif_running(netdev))
3780 status = be_open(netdev);
3781
3782 return status;
3783}
3784
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003785static inline int fw_major_num(const char *fw_ver)
3786{
3787 int fw_major = 0, i;
3788
3789 i = sscanf(fw_ver, "%d.", &fw_major);
3790 if (i != 1)
3791 return 0;
3792
3793 return fw_major;
3794}
3795
Sathya Perla5fb379e2009-06-18 00:02:59 +00003796static int be_setup(struct be_adapter *adapter)
3797{
Sathya Perla39f1d942012-05-08 19:41:24 +00003798 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003799 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003800
Sathya Perla30128032011-11-10 19:17:57 +00003801 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003802
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003803 if (!lancer_chip(adapter))
3804 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003805
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003806 status = be_get_config(adapter);
3807 if (status)
3808 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003809
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003810 status = be_msix_enable(adapter);
3811 if (status)
3812 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003813
Kalesh AP0700d812015-01-20 03:51:43 -05003814 status = be_if_create(adapter, &adapter->if_handle,
3815 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003816 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003817 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003818
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303819 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3820 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303821 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303822 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003823 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003824 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003825
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003826 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003827
Sathya Perla95046b92013-07-23 15:25:02 +05303828 status = be_mac_setup(adapter);
3829 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003830 goto err;
3831
Kalesh APe97e3cd2014-07-17 16:20:26 +05303832 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303833 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003834
Somnath Koture9e2a902013-10-24 14:37:53 +05303835 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303836 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303837 adapter->fw_ver);
3838 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3839 }
3840
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003841 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003842 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003843
3844 be_set_rx_mode(adapter->netdev);
3845
Suresh Reddy76a9e082014-01-15 13:23:40 +05303846 be_cmd_get_acpi_wol_cap(adapter);
3847
Kalesh AP00d594c2015-01-20 03:51:44 -05003848 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3849 adapter->rx_fc);
3850 if (status)
3851 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3852 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003853
Kalesh AP00d594c2015-01-20 03:51:44 -05003854 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3855 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003856
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303857 if (be_physfn(adapter))
3858 be_cmd_set_logical_link_config(adapter,
3859 IFLA_VF_LINK_STATE_AUTO, 0);
3860
Vasundhara Volambec84e62014-06-30 13:01:32 +05303861 if (adapter->num_vfs)
3862 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003863
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003864 status = be_cmd_get_phy_info(adapter);
3865 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003866 adapter->phy.fc_autoneg = 1;
3867
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303868 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303869 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003870 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003871err:
3872 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003873 return status;
3874}
3875
Ivan Vecera66268732011-12-08 01:31:21 +00003876#ifdef CONFIG_NET_POLL_CONTROLLER
3877static void be_netpoll(struct net_device *netdev)
3878{
3879 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003880 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003881 int i;
3882
Sathya Perlae49cc342012-11-27 19:50:02 +00003883 for_all_evt_queues(adapter, eqo, i) {
3884 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3885 napi_schedule(&eqo->napi);
3886 }
Ivan Vecera66268732011-12-08 01:31:21 +00003887}
3888#endif
3889
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303890static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003891
Sathya Perla306f1342011-08-02 19:57:45 +00003892static bool phy_flashing_required(struct be_adapter *adapter)
3893{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05003894 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003895 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003896}
3897
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003898static bool is_comp_in_ufi(struct be_adapter *adapter,
3899 struct flash_section_info *fsec, int type)
3900{
3901 int i = 0, img_type = 0;
3902 struct flash_section_info_g2 *fsec_g2 = NULL;
3903
Sathya Perlaca34fe32012-11-06 17:48:56 +00003904 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003905 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3906
3907 for (i = 0; i < MAX_FLASH_COMP; i++) {
3908 if (fsec_g2)
3909 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3910 else
3911 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3912
3913 if (img_type == type)
3914 return true;
3915 }
3916 return false;
3917
3918}
3919
Jingoo Han4188e7d2013-08-05 18:02:02 +09003920static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303921 int header_size,
3922 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003923{
3924 struct flash_section_info *fsec = NULL;
3925 const u8 *p = fw->data;
3926
3927 p += header_size;
3928 while (p < (fw->data + fw->size)) {
3929 fsec = (struct flash_section_info *)p;
3930 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3931 return fsec;
3932 p += 32;
3933 }
3934 return NULL;
3935}
3936
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303937static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3938 u32 img_offset, u32 img_size, int hdr_size,
3939 u16 img_optype, bool *crc_match)
3940{
3941 u32 crc_offset;
3942 int status;
3943 u8 crc[4];
3944
3945 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3946 if (status)
3947 return status;
3948
3949 crc_offset = hdr_size + img_offset + img_size - 4;
3950
3951 /* Skip flashing, if crc of flashed region matches */
3952 if (!memcmp(crc, p + crc_offset, 4))
3953 *crc_match = true;
3954 else
3955 *crc_match = false;
3956
3957 return status;
3958}
3959
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003960static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303961 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003962{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003963 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303964 u32 total_bytes, flash_op, num_bytes;
3965 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003966
3967 total_bytes = img_size;
3968 while (total_bytes) {
3969 num_bytes = min_t(u32, 32*1024, total_bytes);
3970
3971 total_bytes -= num_bytes;
3972
3973 if (!total_bytes) {
3974 if (optype == OPTYPE_PHY_FW)
3975 flash_op = FLASHROM_OPER_PHY_FLASH;
3976 else
3977 flash_op = FLASHROM_OPER_FLASH;
3978 } else {
3979 if (optype == OPTYPE_PHY_FW)
3980 flash_op = FLASHROM_OPER_PHY_SAVE;
3981 else
3982 flash_op = FLASHROM_OPER_SAVE;
3983 }
3984
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003985 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003986 img += num_bytes;
3987 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303988 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303989 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303990 optype == OPTYPE_PHY_FW)
3991 break;
3992 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003993 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003994 }
3995 return 0;
3996}
3997
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003998/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003999static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304000 const struct firmware *fw,
4001 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004002{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004003 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304004 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004005 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304006 int status, i, filehdr_size, num_comp;
4007 const struct flash_comp *pflashcomp;
4008 bool crc_match;
4009 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004010
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004011 struct flash_comp gen3_flash_types[] = {
4012 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4013 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4014 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4015 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4016 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4017 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4018 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4019 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4020 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4021 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4022 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4023 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4024 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4025 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4026 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4027 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4028 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4029 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4030 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4031 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004032 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004033
4034 struct flash_comp gen2_flash_types[] = {
4035 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4036 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4037 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4038 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4039 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4040 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4041 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4042 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4043 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4044 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4045 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4046 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4047 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4048 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4049 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4050 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004051 };
4052
Sathya Perlaca34fe32012-11-06 17:48:56 +00004053 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004054 pflashcomp = gen3_flash_types;
4055 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004056 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004057 } else {
4058 pflashcomp = gen2_flash_types;
4059 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004060 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004061 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004062 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004063
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004064 /* Get flash section info*/
4065 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4066 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304067 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004068 return -1;
4069 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004070 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004071 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004072 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004073
4074 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4075 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4076 continue;
4077
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004078 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4079 !phy_flashing_required(adapter))
4080 continue;
4081
4082 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304083 status = be_check_flash_crc(adapter, fw->data,
4084 pflashcomp[i].offset,
4085 pflashcomp[i].size,
4086 filehdr_size +
4087 img_hdrs_size,
4088 OPTYPE_REDBOOT, &crc_match);
4089 if (status) {
4090 dev_err(dev,
4091 "Could not get CRC for 0x%x region\n",
4092 pflashcomp[i].optype);
4093 continue;
4094 }
4095
4096 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004097 continue;
4098 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004099
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304100 p = fw->data + filehdr_size + pflashcomp[i].offset +
4101 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004102 if (p + pflashcomp[i].size > fw->data + fw->size)
4103 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004104
4105 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05304106 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004107 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304108 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004109 pflashcomp[i].img_type);
4110 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004111 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004112 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004113 return 0;
4114}
4115
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304116static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4117{
4118 u32 img_type = le32_to_cpu(fsec_entry.type);
4119 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4120
4121 if (img_optype != 0xFFFF)
4122 return img_optype;
4123
4124 switch (img_type) {
4125 case IMAGE_FIRMWARE_iSCSI:
4126 img_optype = OPTYPE_ISCSI_ACTIVE;
4127 break;
4128 case IMAGE_BOOT_CODE:
4129 img_optype = OPTYPE_REDBOOT;
4130 break;
4131 case IMAGE_OPTION_ROM_ISCSI:
4132 img_optype = OPTYPE_BIOS;
4133 break;
4134 case IMAGE_OPTION_ROM_PXE:
4135 img_optype = OPTYPE_PXE_BIOS;
4136 break;
4137 case IMAGE_OPTION_ROM_FCoE:
4138 img_optype = OPTYPE_FCOE_BIOS;
4139 break;
4140 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4141 img_optype = OPTYPE_ISCSI_BACKUP;
4142 break;
4143 case IMAGE_NCSI:
4144 img_optype = OPTYPE_NCSI_FW;
4145 break;
4146 case IMAGE_FLASHISM_JUMPVECTOR:
4147 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4148 break;
4149 case IMAGE_FIRMWARE_PHY:
4150 img_optype = OPTYPE_SH_PHY_FW;
4151 break;
4152 case IMAGE_REDBOOT_DIR:
4153 img_optype = OPTYPE_REDBOOT_DIR;
4154 break;
4155 case IMAGE_REDBOOT_CONFIG:
4156 img_optype = OPTYPE_REDBOOT_CONFIG;
4157 break;
4158 case IMAGE_UFI_DIR:
4159 img_optype = OPTYPE_UFI_DIR;
4160 break;
4161 default:
4162 break;
4163 }
4164
4165 return img_optype;
4166}
4167
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004168static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304169 const struct firmware *fw,
4170 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004171{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004172 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304173 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004174 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304175 u32 img_offset, img_size, img_type;
4176 int status, i, filehdr_size;
4177 bool crc_match, old_fw_img;
4178 u16 img_optype;
4179 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004180
4181 filehdr_size = sizeof(struct flash_file_hdr_g3);
4182 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4183 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304184 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304185 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004186 }
4187
4188 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4189 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4190 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304191 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4192 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4193 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004194
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304195 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004196 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304197 /* Don't bother verifying CRC if an old FW image is being
4198 * flashed
4199 */
4200 if (old_fw_img)
4201 goto flash;
4202
4203 status = be_check_flash_crc(adapter, fw->data, img_offset,
4204 img_size, filehdr_size +
4205 img_hdrs_size, img_optype,
4206 &crc_match);
4207 /* The current FW image on the card does not recognize the new
4208 * FLASH op_type. The FW download is partially complete.
4209 * Reboot the server now to enable FW image to recognize the
4210 * new FLASH op_type. To complete the remaining process,
4211 * download the same FW again after the reboot.
4212 */
Kalesh AP4c600052014-05-30 19:06:26 +05304213 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4214 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304215 dev_err(dev, "Flash incomplete. Reset the server\n");
4216 dev_err(dev, "Download FW image again after reset\n");
4217 return -EAGAIN;
4218 } else if (status) {
4219 dev_err(dev, "Could not get CRC for 0x%x region\n",
4220 img_optype);
4221 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004222 }
4223
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304224 if (crc_match)
4225 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004226
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304227flash:
4228 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004229 if (p + img_size > fw->data + fw->size)
4230 return -1;
4231
4232 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304233 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4234 * UFI_DIR region
4235 */
Kalesh AP4c600052014-05-30 19:06:26 +05304236 if (old_fw_img &&
4237 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4238 (img_optype == OPTYPE_UFI_DIR &&
4239 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304240 continue;
4241 } else if (status) {
4242 dev_err(dev, "Flashing section type 0x%x failed\n",
4243 img_type);
4244 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004245 }
4246 }
4247 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004248}
4249
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004250static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304251 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004252{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004253#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4254#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304255 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004256 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004257 const u8 *data_ptr = NULL;
4258 u8 *dest_image_ptr = NULL;
4259 size_t image_size = 0;
4260 u32 chunk_size = 0;
4261 u32 data_written = 0;
4262 u32 offset = 0;
4263 int status = 0;
4264 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004265 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004266
4267 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304268 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304269 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004270 }
4271
4272 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4273 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304274 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004275 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304276 if (!flash_cmd.va)
4277 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004278
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004279 dest_image_ptr = flash_cmd.va +
4280 sizeof(struct lancer_cmd_req_write_object);
4281 image_size = fw->size;
4282 data_ptr = fw->data;
4283
4284 while (image_size) {
4285 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4286
4287 /* Copy the image chunk content. */
4288 memcpy(dest_image_ptr, data_ptr, chunk_size);
4289
4290 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004291 chunk_size, offset,
4292 LANCER_FW_DOWNLOAD_LOCATION,
4293 &data_written, &change_status,
4294 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004295 if (status)
4296 break;
4297
4298 offset += data_written;
4299 data_ptr += data_written;
4300 image_size -= data_written;
4301 }
4302
4303 if (!status) {
4304 /* Commit the FW written */
4305 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004306 0, offset,
4307 LANCER_FW_DOWNLOAD_LOCATION,
4308 &data_written, &change_status,
4309 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004310 }
4311
Kalesh APbb864e02014-09-02 09:56:51 +05304312 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004313 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304314 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304315 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004316 }
4317
Kalesh APbb864e02014-09-02 09:56:51 +05304318 dev_info(dev, "Firmware flashed successfully\n");
4319
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004320 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304321 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004322 status = lancer_physdev_ctrl(adapter,
4323 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004324 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304325 dev_err(dev, "Adapter busy, could not reset FW\n");
4326 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004327 }
4328 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304329 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004330 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304331
4332 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004333}
4334
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004335#define BE2_UFI 2
4336#define BE3_UFI 3
4337#define BE3R_UFI 10
4338#define SH_UFI 4
4339
Sathya Perlaca34fe32012-11-06 17:48:56 +00004340static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004341 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004342{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004343 if (!fhdr) {
4344 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4345 return -1;
4346 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004347
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004348 /* First letter of the build version is used to identify
4349 * which chip this image file is meant for.
4350 */
4351 switch (fhdr->build[0]) {
4352 case BLD_STR_UFI_TYPE_SH:
4353 return SH_UFI;
4354 case BLD_STR_UFI_TYPE_BE3:
4355 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4356 BE3_UFI;
4357 case BLD_STR_UFI_TYPE_BE2:
4358 return BE2_UFI;
4359 default:
4360 return -1;
4361 }
4362}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004363
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004364/* Check if the flash image file is compatible with the adapter that
4365 * is being flashed.
4366 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
4367 */
4368static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4369 struct flash_file_hdr_g3 *fhdr)
4370{
4371 int ufi_type = be_get_ufi_type(adapter, fhdr);
4372
4373 switch (ufi_type) {
4374 case SH_UFI:
4375 return skyhawk_chip(adapter);
4376 case BE3R_UFI:
4377 return BE3_chip(adapter);
4378 case BE3_UFI:
4379 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4380 case BE2_UFI:
4381 return BE2_chip(adapter);
4382 default:
4383 return false;
4384 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004385}
4386
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004387static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4388{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004389 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004390 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004391 struct image_hdr *img_hdr_ptr;
4392 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004393 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004394
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004395 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4396 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4397 dev_err(dev, "Flash image is not compatible with adapter\n");
4398 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004399 }
4400
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004401 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4402 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4403 GFP_KERNEL);
4404 if (!flash_cmd.va)
4405 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004406
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004407 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4408 for (i = 0; i < num_imgs; i++) {
4409 img_hdr_ptr = (struct image_hdr *)(fw->data +
4410 (sizeof(struct flash_file_hdr_g3) +
4411 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004412 if (!BE2_chip(adapter) &&
4413 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4414 continue;
4415
4416 if (skyhawk_chip(adapter))
4417 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4418 num_imgs);
4419 else
4420 status = be_flash_BEx(adapter, fw, &flash_cmd,
4421 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004422 }
4423
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004424 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4425 if (!status)
4426 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004427
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004428 return status;
4429}
4430
4431int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4432{
4433 const struct firmware *fw;
4434 int status;
4435
4436 if (!netif_running(adapter->netdev)) {
4437 dev_err(&adapter->pdev->dev,
4438 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304439 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004440 }
4441
4442 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4443 if (status)
4444 goto fw_exit;
4445
4446 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4447
4448 if (lancer_chip(adapter))
4449 status = lancer_fw_download(adapter, fw);
4450 else
4451 status = be_fw_download(adapter, fw);
4452
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004453 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304454 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004455
Ajit Khaparde84517482009-09-04 03:12:16 +00004456fw_exit:
4457 release_firmware(fw);
4458 return status;
4459}
4460
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004461static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4462 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004463{
4464 struct be_adapter *adapter = netdev_priv(dev);
4465 struct nlattr *attr, *br_spec;
4466 int rem;
4467 int status = 0;
4468 u16 mode = 0;
4469
4470 if (!sriov_enabled(adapter))
4471 return -EOPNOTSUPP;
4472
4473 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004474 if (!br_spec)
4475 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004476
4477 nla_for_each_nested(attr, br_spec, rem) {
4478 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4479 continue;
4480
Thomas Grafb7c1a312014-11-26 13:42:17 +01004481 if (nla_len(attr) < sizeof(mode))
4482 return -EINVAL;
4483
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004484 mode = nla_get_u16(attr);
4485 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4486 return -EINVAL;
4487
4488 status = be_cmd_set_hsw_config(adapter, 0, 0,
4489 adapter->if_handle,
4490 mode == BRIDGE_MODE_VEPA ?
4491 PORT_FWD_TYPE_VEPA :
4492 PORT_FWD_TYPE_VEB);
4493 if (status)
4494 goto err;
4495
4496 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4497 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4498
4499 return status;
4500 }
4501err:
4502 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4503 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4504
4505 return status;
4506}
4507
4508static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304509 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004510{
4511 struct be_adapter *adapter = netdev_priv(dev);
4512 int status = 0;
4513 u8 hsw_mode;
4514
4515 if (!sriov_enabled(adapter))
4516 return 0;
4517
4518 /* BE and Lancer chips support VEB mode only */
4519 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4520 hsw_mode = PORT_FWD_TYPE_VEB;
4521 } else {
4522 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4523 adapter->if_handle, &hsw_mode);
4524 if (status)
4525 return 0;
4526 }
4527
4528 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4529 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004530 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4531 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004532}
4533
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304534#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004535/* VxLAN offload Notes:
4536 *
4537 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4538 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4539 * is expected to work across all types of IP tunnels once exported. Skyhawk
4540 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304541 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4542 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4543 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004544 *
4545 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4546 * adds more than one port, disable offloads and don't re-enable them again
4547 * until after all the tunnels are removed.
4548 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304549static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4550 __be16 port)
4551{
4552 struct be_adapter *adapter = netdev_priv(netdev);
4553 struct device *dev = &adapter->pdev->dev;
4554 int status;
4555
4556 if (lancer_chip(adapter) || BEx_chip(adapter))
4557 return;
4558
4559 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304560 dev_info(dev,
4561 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004562 dev_info(dev, "Disabling VxLAN offloads\n");
4563 adapter->vxlan_port_count++;
4564 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304565 }
4566
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004567 if (adapter->vxlan_port_count++ >= 1)
4568 return;
4569
Sathya Perlac9c47142014-03-27 10:46:19 +05304570 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4571 OP_CONVERT_NORMAL_TO_TUNNEL);
4572 if (status) {
4573 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4574 goto err;
4575 }
4576
4577 status = be_cmd_set_vxlan_port(adapter, port);
4578 if (status) {
4579 dev_warn(dev, "Failed to add VxLAN port\n");
4580 goto err;
4581 }
4582 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4583 adapter->vxlan_port = port;
4584
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004585 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4586 NETIF_F_TSO | NETIF_F_TSO6 |
4587 NETIF_F_GSO_UDP_TUNNEL;
4588 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304589 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004590
Sathya Perlac9c47142014-03-27 10:46:19 +05304591 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4592 be16_to_cpu(port));
4593 return;
4594err:
4595 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304596}
4597
4598static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4599 __be16 port)
4600{
4601 struct be_adapter *adapter = netdev_priv(netdev);
4602
4603 if (lancer_chip(adapter) || BEx_chip(adapter))
4604 return;
4605
4606 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004607 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304608
4609 be_disable_vxlan_offloads(adapter);
4610
4611 dev_info(&adapter->pdev->dev,
4612 "Disabled VxLAN offloads for UDP port %d\n",
4613 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004614done:
4615 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304616}
Joe Stringer725d5482014-11-13 16:38:13 -08004617
Jesse Gross5f352272014-12-23 22:37:26 -08004618static netdev_features_t be_features_check(struct sk_buff *skb,
4619 struct net_device *dev,
4620 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004621{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304622 struct be_adapter *adapter = netdev_priv(dev);
4623 u8 l4_hdr = 0;
4624
4625 /* The code below restricts offload features for some tunneled packets.
4626 * Offload features for normal (non tunnel) packets are unchanged.
4627 */
4628 if (!skb->encapsulation ||
4629 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4630 return features;
4631
4632 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4633 * should disable tunnel offload features if it's not a VxLAN packet,
4634 * as tunnel offloads have been enabled only for VxLAN. This is done to
4635 * allow other tunneled traffic like GRE work fine while VxLAN
4636 * offloads are configured in Skyhawk-R.
4637 */
4638 switch (vlan_get_protocol(skb)) {
4639 case htons(ETH_P_IP):
4640 l4_hdr = ip_hdr(skb)->protocol;
4641 break;
4642 case htons(ETH_P_IPV6):
4643 l4_hdr = ipv6_hdr(skb)->nexthdr;
4644 break;
4645 default:
4646 return features;
4647 }
4648
4649 if (l4_hdr != IPPROTO_UDP ||
4650 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4651 skb->inner_protocol != htons(ETH_P_TEB) ||
4652 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4653 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4654 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4655
4656 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004657}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304658#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304659
stephen hemmingere5686ad2012-01-05 19:10:25 +00004660static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004661 .ndo_open = be_open,
4662 .ndo_stop = be_close,
4663 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004664 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004665 .ndo_set_mac_address = be_mac_addr_set,
4666 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004667 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004668 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004669 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4670 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004671 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004672 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004673 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004674 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304675 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004676#ifdef CONFIG_NET_POLL_CONTROLLER
4677 .ndo_poll_controller = be_netpoll,
4678#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004679 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4680 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304681#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304682 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304683#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304684#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304685 .ndo_add_vxlan_port = be_add_vxlan_port,
4686 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004687 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304688#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004689};
4690
4691static void be_netdev_init(struct net_device *netdev)
4692{
4693 struct be_adapter *adapter = netdev_priv(netdev);
4694
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004695 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004696 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004697 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004698 if (be_multi_rxq(adapter))
4699 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004700
4701 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004702 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004703
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004704 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004705 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004706
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004707 netdev->priv_flags |= IFF_UNICAST_FLT;
4708
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004709 netdev->flags |= IFF_MULTICAST;
4710
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004711 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004712
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004713 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004714
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004715 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004716}
4717
4718static void be_unmap_pci_bars(struct be_adapter *adapter)
4719{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004720 if (adapter->csr)
4721 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004722 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004723 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004724}
4725
Sathya Perlace66f782012-11-06 17:48:58 +00004726static int db_bar(struct be_adapter *adapter)
4727{
4728 if (lancer_chip(adapter) || !be_physfn(adapter))
4729 return 0;
4730 else
4731 return 4;
4732}
4733
4734static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004735{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004736 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004737 adapter->roce_db.size = 4096;
4738 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4739 db_bar(adapter));
4740 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4741 db_bar(adapter));
4742 }
Parav Pandit045508a2012-03-26 14:27:13 +00004743 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004744}
4745
4746static int be_map_pci_bars(struct be_adapter *adapter)
4747{
4748 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004749
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004750 if (BEx_chip(adapter) && be_physfn(adapter)) {
4751 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304752 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004753 return -ENOMEM;
4754 }
4755
Sathya Perlace66f782012-11-06 17:48:58 +00004756 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304757 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004758 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004759 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004760
4761 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004762 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004764pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304765 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004766 be_unmap_pci_bars(adapter);
4767 return -ENOMEM;
4768}
4769
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004770static void be_ctrl_cleanup(struct be_adapter *adapter)
4771{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004772 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004773
4774 be_unmap_pci_bars(adapter);
4775
4776 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004777 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4778 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004779
Sathya Perla5b8821b2011-08-02 19:57:44 +00004780 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004781 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004782 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4783 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004784}
4785
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004786static int be_ctrl_init(struct be_adapter *adapter)
4787{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004788 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4789 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004790 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004791 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004792 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004793
Sathya Perlace66f782012-11-06 17:48:58 +00004794 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4795 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4796 SLI_INTF_FAMILY_SHIFT;
4797 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004799 status = be_map_pci_bars(adapter);
4800 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004801 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004802
4803 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004804 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4805 mbox_mem_alloc->size,
4806 &mbox_mem_alloc->dma,
4807 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004808 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004809 status = -ENOMEM;
4810 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004811 }
4812 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4813 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4814 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4815 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004816
Sathya Perla5b8821b2011-08-02 19:57:44 +00004817 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004818 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4819 rx_filter->size, &rx_filter->dma,
4820 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304821 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004822 status = -ENOMEM;
4823 goto free_mbox;
4824 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004825
Ivan Vecera29849612010-12-14 05:43:19 +00004826 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004827 spin_lock_init(&adapter->mcc_lock);
4828 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004829
Suresh Reddy5eeff632014-01-06 13:02:24 +05304830 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004831 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004832 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004833
4834free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004835 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4836 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004837
4838unmap_pci_bars:
4839 be_unmap_pci_bars(adapter);
4840
4841done:
4842 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004843}
4844
4845static void be_stats_cleanup(struct be_adapter *adapter)
4846{
Sathya Perla3abcded2010-10-03 22:12:27 -07004847 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004848
4849 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004850 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4851 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004852}
4853
4854static int be_stats_init(struct be_adapter *adapter)
4855{
Sathya Perla3abcded2010-10-03 22:12:27 -07004856 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004857
Sathya Perlaca34fe32012-11-06 17:48:56 +00004858 if (lancer_chip(adapter))
4859 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4860 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004861 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004862 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004863 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004864 else
4865 /* ALL non-BE ASICs */
4866 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004867
Joe Perchesede23fa82013-08-26 22:45:23 -07004868 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4869 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304870 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304871 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004872 return 0;
4873}
4874
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004875static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004876{
4877 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004878
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004879 if (!adapter)
4880 return;
4881
Parav Pandit045508a2012-03-26 14:27:13 +00004882 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004883 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004884
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004885 cancel_delayed_work_sync(&adapter->func_recovery_work);
4886
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004887 unregister_netdev(adapter->netdev);
4888
Sathya Perla5fb379e2009-06-18 00:02:59 +00004889 be_clear(adapter);
4890
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004891 /* tell fw we're done with firing cmds */
4892 be_cmd_fw_clean(adapter);
4893
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004894 be_stats_cleanup(adapter);
4895
4896 be_ctrl_cleanup(adapter);
4897
Sathya Perlad6b6d982012-09-05 01:56:48 +00004898 pci_disable_pcie_error_reporting(pdev);
4899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004900 pci_release_regions(pdev);
4901 pci_disable_device(pdev);
4902
4903 free_netdev(adapter->netdev);
4904}
4905
Sathya Perla39f1d942012-05-08 19:41:24 +00004906static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004907{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304908 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004909
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004910 status = be_cmd_get_cntl_attributes(adapter);
4911 if (status)
4912 return status;
4913
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004914 /* Must be a power of 2 or else MODULO will BUG_ON */
4915 adapter->be_get_temp_freq = 64;
4916
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304917 if (BEx_chip(adapter)) {
4918 level = be_cmd_get_fw_log_level(adapter);
4919 adapter->msg_enable =
4920 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4921 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004922
Sathya Perla92bf14a2013-08-27 16:57:32 +05304923 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004924 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004925}
4926
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004927static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004928{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004929 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004930 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004931
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004932 status = lancer_test_and_set_rdy_state(adapter);
4933 if (status)
4934 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004935
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004936 if (netif_running(adapter->netdev))
4937 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004938
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004939 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004940
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004941 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004942
4943 status = be_setup(adapter);
4944 if (status)
4945 goto err;
4946
4947 if (netif_running(adapter->netdev)) {
4948 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004949 if (status)
4950 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004951 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004952
Somnath Kotur4bebb562013-12-05 12:07:55 +05304953 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004954 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004955err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004956 if (status == -EAGAIN)
4957 dev_err(dev, "Waiting for resource provisioning\n");
4958 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304959 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004960
4961 return status;
4962}
4963
4964static void be_func_recovery_task(struct work_struct *work)
4965{
4966 struct be_adapter *adapter =
4967 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004968 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004969
4970 be_detect_error(adapter);
4971
4972 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004973 rtnl_lock();
4974 netif_device_detach(adapter->netdev);
4975 rtnl_unlock();
4976
4977 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004978 if (!status)
4979 netif_device_attach(adapter->netdev);
4980 }
4981
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004982 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4983 * no need to attempt further recovery.
4984 */
4985 if (!status || status == -EAGAIN)
4986 schedule_delayed_work(&adapter->func_recovery_work,
4987 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004988}
4989
4990static void be_worker(struct work_struct *work)
4991{
4992 struct be_adapter *adapter =
4993 container_of(work, struct be_adapter, work.work);
4994 struct be_rx_obj *rxo;
4995 int i;
4996
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004997 /* when interrupts are not yet enabled, just reap any pending
4998 * mcc completions */
4999 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005000 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005001 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005002 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005003 goto reschedule;
5004 }
5005
5006 if (!adapter->stats_cmd_sent) {
5007 if (lancer_chip(adapter))
5008 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305009 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005010 else
5011 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5012 }
5013
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305014 if (be_physfn(adapter) &&
5015 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005016 be_cmd_get_die_temperature(adapter);
5017
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005018 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305019 /* Replenish RX-queues starved due to memory
5020 * allocation failures.
5021 */
5022 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305023 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005024 }
5025
Sathya Perla2632baf2013-10-01 16:00:00 +05305026 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005027
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005028reschedule:
5029 adapter->work_counter++;
5030 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5031}
5032
Sathya Perla257a3fe2013-06-14 15:54:51 +05305033/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00005034static bool be_reset_required(struct be_adapter *adapter)
5035{
Sathya Perla257a3fe2013-06-14 15:54:51 +05305036 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00005037}
5038
Sathya Perlad3791422012-09-28 04:39:44 +00005039static char *mc_name(struct be_adapter *adapter)
5040{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305041 char *str = ""; /* default */
5042
5043 switch (adapter->mc_type) {
5044 case UMC:
5045 str = "UMC";
5046 break;
5047 case FLEX10:
5048 str = "FLEX10";
5049 break;
5050 case vNIC1:
5051 str = "vNIC-1";
5052 break;
5053 case nPAR:
5054 str = "nPAR";
5055 break;
5056 case UFP:
5057 str = "UFP";
5058 break;
5059 case vNIC2:
5060 str = "vNIC-2";
5061 break;
5062 default:
5063 str = "";
5064 }
5065
5066 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005067}
5068
5069static inline char *func_name(struct be_adapter *adapter)
5070{
5071 return be_physfn(adapter) ? "PF" : "VF";
5072}
5073
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005074static inline char *nic_name(struct pci_dev *pdev)
5075{
5076 switch (pdev->device) {
5077 case OC_DEVICE_ID1:
5078 return OC_NAME;
5079 case OC_DEVICE_ID2:
5080 return OC_NAME_BE;
5081 case OC_DEVICE_ID3:
5082 case OC_DEVICE_ID4:
5083 return OC_NAME_LANCER;
5084 case BE_DEVICE_ID2:
5085 return BE3_NAME;
5086 case OC_DEVICE_ID5:
5087 case OC_DEVICE_ID6:
5088 return OC_NAME_SH;
5089 default:
5090 return BE_NAME;
5091 }
5092}
5093
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005094static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005095{
5096 int status = 0;
5097 struct be_adapter *adapter;
5098 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005099 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005100
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305101 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5102
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005103 status = pci_enable_device(pdev);
5104 if (status)
5105 goto do_none;
5106
5107 status = pci_request_regions(pdev, DRV_NAME);
5108 if (status)
5109 goto disable_dev;
5110 pci_set_master(pdev);
5111
Sathya Perla7f640062012-06-05 19:37:20 +00005112 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305113 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005114 status = -ENOMEM;
5115 goto rel_reg;
5116 }
5117 adapter = netdev_priv(netdev);
5118 adapter->pdev = pdev;
5119 pci_set_drvdata(pdev, adapter);
5120 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005121 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005122
Russell King4c15c242013-06-26 23:49:11 +01005123 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005124 if (!status) {
5125 netdev->features |= NETIF_F_HIGHDMA;
5126 } else {
Russell King4c15c242013-06-26 23:49:11 +01005127 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005128 if (status) {
5129 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5130 goto free_netdev;
5131 }
5132 }
5133
Kalesh AP2f951a92014-09-12 17:39:21 +05305134 status = pci_enable_pcie_error_reporting(pdev);
5135 if (!status)
5136 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005137
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005138 status = be_ctrl_init(adapter);
5139 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005140 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005141
Sathya Perla2243e2e2009-11-22 22:02:03 +00005142 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005143 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005144 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005145 if (status)
5146 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00005147 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00005148
Sathya Perla39f1d942012-05-08 19:41:24 +00005149 if (be_reset_required(adapter)) {
5150 status = be_cmd_reset_function(adapter);
5151 if (status)
5152 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07005153
Kalesh AP2d177be2013-04-28 22:22:29 +00005154 /* Wait for interrupts to quiesce after an FLR */
5155 msleep(100);
5156 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00005157
5158 /* Allow interrupts for other ULPs running on NIC function */
5159 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005160
Kalesh AP2d177be2013-04-28 22:22:29 +00005161 /* tell fw we're ready to fire cmds */
5162 status = be_cmd_fw_init(adapter);
5163 if (status)
5164 goto ctrl_clean;
5165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005166 status = be_stats_init(adapter);
5167 if (status)
5168 goto ctrl_clean;
5169
Sathya Perla39f1d942012-05-08 19:41:24 +00005170 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005171 if (status)
5172 goto stats_clean;
5173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005174 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005175 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05305176 adapter->rx_fc = true;
5177 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005178
Sathya Perla5fb379e2009-06-18 00:02:59 +00005179 status = be_setup(adapter);
5180 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00005181 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005182
Sathya Perla3abcded2010-10-03 22:12:27 -07005183 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005184 status = register_netdev(netdev);
5185 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005186 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005187
Parav Pandit045508a2012-03-26 14:27:13 +00005188 be_roce_dev_add(adapter);
5189
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005190 schedule_delayed_work(&adapter->func_recovery_work,
5191 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005192
5193 be_cmd_query_port_name(adapter, &port_name);
5194
Sathya Perlad3791422012-09-28 04:39:44 +00005195 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5196 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005198 return 0;
5199
Sathya Perla5fb379e2009-06-18 00:02:59 +00005200unsetup:
5201 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005202stats_clean:
5203 be_stats_cleanup(adapter);
5204ctrl_clean:
5205 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005206free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005207 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005208rel_reg:
5209 pci_release_regions(pdev);
5210disable_dev:
5211 pci_disable_device(pdev);
5212do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005213 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005214 return status;
5215}
5216
5217static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5218{
5219 struct be_adapter *adapter = pci_get_drvdata(pdev);
5220 struct net_device *netdev = adapter->netdev;
5221
Suresh Reddy76a9e082014-01-15 13:23:40 +05305222 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005223 be_setup_wol(adapter, true);
5224
Ajit Khaparded4360d62013-11-22 12:51:09 -06005225 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005226 cancel_delayed_work_sync(&adapter->func_recovery_work);
5227
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005228 netif_device_detach(netdev);
5229 if (netif_running(netdev)) {
5230 rtnl_lock();
5231 be_close(netdev);
5232 rtnl_unlock();
5233 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005234 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005235
5236 pci_save_state(pdev);
5237 pci_disable_device(pdev);
5238 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5239 return 0;
5240}
5241
5242static int be_resume(struct pci_dev *pdev)
5243{
5244 int status = 0;
5245 struct be_adapter *adapter = pci_get_drvdata(pdev);
5246 struct net_device *netdev = adapter->netdev;
5247
5248 netif_device_detach(netdev);
5249
5250 status = pci_enable_device(pdev);
5251 if (status)
5252 return status;
5253
Yijing Wang1ca01512013-06-27 20:53:42 +08005254 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005255 pci_restore_state(pdev);
5256
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305257 status = be_fw_wait_ready(adapter);
5258 if (status)
5259 return status;
5260
Kalesh AP9a6d73d2015-01-20 03:51:47 -05005261 status = be_cmd_reset_function(adapter);
5262 if (status)
5263 return status;
5264
Ajit Khaparded4360d62013-11-22 12:51:09 -06005265 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005266 /* tell fw we're ready to fire cmds */
5267 status = be_cmd_fw_init(adapter);
5268 if (status)
5269 return status;
5270
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005271 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005272 if (netif_running(netdev)) {
5273 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005274 be_open(netdev);
5275 rtnl_unlock();
5276 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005277
5278 schedule_delayed_work(&adapter->func_recovery_work,
5279 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005280 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005281
Suresh Reddy76a9e082014-01-15 13:23:40 +05305282 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005283 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005285 return 0;
5286}
5287
Sathya Perla82456b02010-02-17 01:35:37 +00005288/*
5289 * An FLR will stop BE from DMAing any data.
5290 */
5291static void be_shutdown(struct pci_dev *pdev)
5292{
5293 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005294
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005295 if (!adapter)
5296 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005297
Devesh Sharmad114f992014-06-10 19:32:15 +05305298 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005299 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005300 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005301
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005302 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005303
Ajit Khaparde57841862011-04-06 18:08:43 +00005304 be_cmd_reset_function(adapter);
5305
Sathya Perla82456b02010-02-17 01:35:37 +00005306 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005307}
5308
Sathya Perlacf588472010-02-14 21:22:01 +00005309static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305310 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005311{
5312 struct be_adapter *adapter = pci_get_drvdata(pdev);
5313 struct net_device *netdev = adapter->netdev;
5314
5315 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5316
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005317 if (!adapter->eeh_error) {
5318 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005319
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005320 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005321
Sathya Perlacf588472010-02-14 21:22:01 +00005322 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005323 netif_device_detach(netdev);
5324 if (netif_running(netdev))
5325 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005326 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005327
5328 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005329 }
Sathya Perlacf588472010-02-14 21:22:01 +00005330
5331 if (state == pci_channel_io_perm_failure)
5332 return PCI_ERS_RESULT_DISCONNECT;
5333
5334 pci_disable_device(pdev);
5335
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005336 /* The error could cause the FW to trigger a flash debug dump.
5337 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005338 * can cause it not to recover; wait for it to finish.
5339 * Wait only for first function as it is needed only once per
5340 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005341 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005342 if (pdev->devfn == 0)
5343 ssleep(30);
5344
Sathya Perlacf588472010-02-14 21:22:01 +00005345 return PCI_ERS_RESULT_NEED_RESET;
5346}
5347
5348static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5349{
5350 struct be_adapter *adapter = pci_get_drvdata(pdev);
5351 int status;
5352
5353 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005354
5355 status = pci_enable_device(pdev);
5356 if (status)
5357 return PCI_ERS_RESULT_DISCONNECT;
5358
5359 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005360 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005361 pci_restore_state(pdev);
5362
5363 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005364 dev_info(&adapter->pdev->dev,
5365 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005366 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005367 if (status)
5368 return PCI_ERS_RESULT_DISCONNECT;
5369
Sathya Perlad6b6d982012-09-05 01:56:48 +00005370 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005371 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005372 return PCI_ERS_RESULT_RECOVERED;
5373}
5374
5375static void be_eeh_resume(struct pci_dev *pdev)
5376{
5377 int status = 0;
5378 struct be_adapter *adapter = pci_get_drvdata(pdev);
5379 struct net_device *netdev = adapter->netdev;
5380
5381 dev_info(&adapter->pdev->dev, "EEH resume\n");
5382
5383 pci_save_state(pdev);
5384
Kalesh AP2d177be2013-04-28 22:22:29 +00005385 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005386 if (status)
5387 goto err;
5388
Kalesh AP03a58ba2014-05-13 14:03:11 +05305389 /* On some BE3 FW versions, after a HW reset,
5390 * interrupts will remain disabled for each function.
5391 * So, explicitly enable interrupts
5392 */
5393 be_intr_set(adapter, true);
5394
Kalesh AP2d177be2013-04-28 22:22:29 +00005395 /* tell fw we're ready to fire cmds */
5396 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005397 if (status)
5398 goto err;
5399
Sathya Perlacf588472010-02-14 21:22:01 +00005400 status = be_setup(adapter);
5401 if (status)
5402 goto err;
5403
5404 if (netif_running(netdev)) {
5405 status = be_open(netdev);
5406 if (status)
5407 goto err;
5408 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005409
5410 schedule_delayed_work(&adapter->func_recovery_work,
5411 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005412 netif_device_attach(netdev);
5413 return;
5414err:
5415 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005416}
5417
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005418static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005419 .error_detected = be_eeh_err_detected,
5420 .slot_reset = be_eeh_reset,
5421 .resume = be_eeh_resume,
5422};
5423
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005424static struct pci_driver be_driver = {
5425 .name = DRV_NAME,
5426 .id_table = be_dev_ids,
5427 .probe = be_probe,
5428 .remove = be_remove,
5429 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005430 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005431 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005432 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005433};
5434
5435static int __init be_init_module(void)
5436{
Joe Perches8e95a202009-12-03 07:58:21 +00005437 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5438 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005439 printk(KERN_WARNING DRV_NAME
5440 " : Module param rx_frag_size must be 2048/4096/8192."
5441 " Using 2048\n");
5442 rx_frag_size = 2048;
5443 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005444
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005445 return pci_register_driver(&be_driver);
5446}
5447module_init(be_init_module);
5448
5449static void __exit be_exit_module(void)
5450{
5451 pci_unregister_driver(&be_driver);
5452}
5453module_exit(be_exit_module);