blob: 3162a41c3c3a2b8b7412ac42671cca4f2a845f8b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530749 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530751 proto = skb_inner_ip_proto(skb);
752 } else {
753 proto = skb_ip_proto(skb);
754 }
755 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 }
760
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700761 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
771 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530775 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000776{
777 dma_addr_t dma;
778
779 be_dws_le_to_cpu(wrb, sizeof(*wrb));
780
781 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000782 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000783 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000784 dma_unmap_single(dev, dma, wrb->frag_len,
785 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000786 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000787 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 }
789}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790
Sathya Perla3c8def92011-06-12 20:01:58 +0000791static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530792 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
793 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794{
Sathya Perla7101e112010-03-22 20:41:12 +0000795 dma_addr_t busaddr;
796 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000797 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct be_eth_wrb *wrb;
800 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000801 bool map_single = false;
802 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 hdr = queue_head_node(txq);
805 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000806 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
David S. Millerebc8d2a2009-06-09 01:01:31 -0700808 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700809 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000812 goto dma_err;
813 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 wrb = queue_head_node(txq);
815 wrb_fill(wrb, busaddr, len);
816 be_dws_cpu_to_le(wrb, sizeof(*wrb));
817 queue_head_inc(txq);
818 copied += len;
819 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000823 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000824 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000825 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000826 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700827 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000828 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700829 be_dws_cpu_to_le(wrb, sizeof(*wrb));
830 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000831 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 }
833
834 if (dummy_wrb) {
835 wrb = queue_head_node(txq);
836 wrb_fill(wrb, 0, 0);
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
839 }
840
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 be_dws_cpu_to_le(hdr, sizeof(*hdr));
843
844 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000845dma_err:
846 txq->head = map_head;
847 while (copied) {
848 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000849 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000850 map_single = false;
851 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530852 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Patrick McHardy80d5c362013-04-19 02:04:28 +00001148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301155 return status;
1156
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301157 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301158 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301161 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001162
Somnath Kotura6b74e02014-01-21 15:50:55 +05301163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301168
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001169 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170}
1171
Patrick McHardy80d5c362013-04-19 02:04:28 +00001172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301178 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001179
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301180 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Somnath kotur7ad09452014-03-03 14:24:43 +05301186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
Sathya Perlaa54769f2011-10-24 02:45:00 +00001194static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001197 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198
1199 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001201 adapter->promiscuous = true;
1202 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001204
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001205 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001206 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301207 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001208 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001209 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 }
1211
Sathya Perlae7b909a2009-11-22 22:01:10 +00001212 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001213 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001216
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
Sathya Perla92bf14a2013-08-27 16:57:32 +05301226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001245 }
Kalesh APa0794882014-05-30 19:06:23 +05301246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001257done:
1258 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259}
1260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001265 int status;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268 return -EPERM;
1269
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return -EINVAL;
1272
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301273 /* Proceed further only if user provided MAC is different
1274 * from active MAC
1275 */
1276 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1277 return 0;
1278
Sathya Perla3175d8c2013-07-23 15:25:03 +05301279 if (BEx_chip(adapter)) {
1280 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1281 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001282
Sathya Perla11ac75e2011-12-13 00:58:50 +00001283 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1284 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301285 } else {
1286 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1287 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001288 }
1289
Kalesh APabccf232014-07-17 16:20:24 +05301290 if (status) {
1291 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1292 mac, vf, status);
1293 return be_cmd_status(status);
1294 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001295
Kalesh APabccf232014-07-17 16:20:24 +05301296 ether_addr_copy(vf_cfg->mac_addr, mac);
1297
1298 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001299}
1300
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001301static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301302 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303{
1304 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001305 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001306
Sathya Perla11ac75e2011-12-13 00:58:50 +00001307 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001308 return -EPERM;
1309
Sathya Perla11ac75e2011-12-13 00:58:50 +00001310 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311 return -EINVAL;
1312
1313 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001314 vi->max_tx_rate = vf_cfg->tx_rate;
1315 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001316 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1317 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301319 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001320
1321 return 0;
1322}
1323
Sathya Perla748b5392014-05-09 13:29:13 +05301324static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325{
1326 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001327 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001328 int status = 0;
1329
Sathya Perla11ac75e2011-12-13 00:58:50 +00001330 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001331 return -EPERM;
1332
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001333 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001334 return -EINVAL;
1335
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001336 if (vlan || qos) {
1337 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301338 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001339 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1340 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001342 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301343 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1344 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001345 }
1346
Kalesh APabccf232014-07-17 16:20:24 +05301347 if (status) {
1348 dev_err(&adapter->pdev->dev,
1349 "VLAN %d config on VF %d failed : %#x\n", vlan,
1350 vf, status);
1351 return be_cmd_status(status);
1352 }
1353
1354 vf_cfg->vlan_tag = vlan;
1355
1356 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001357}
1358
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001359static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1360 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001361{
1362 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301363 struct device *dev = &adapter->pdev->dev;
1364 int percent_rate, status = 0;
1365 u16 link_speed = 0;
1366 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001367
Sathya Perla11ac75e2011-12-13 00:58:50 +00001368 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001369 return -EPERM;
1370
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001371 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001372 return -EINVAL;
1373
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001374 if (min_tx_rate)
1375 return -EINVAL;
1376
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301377 if (!max_tx_rate)
1378 goto config_qos;
1379
1380 status = be_cmd_link_status_query(adapter, &link_speed,
1381 &link_status, 0);
1382 if (status)
1383 goto err;
1384
1385 if (!link_status) {
1386 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301387 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301388 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001389 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001390
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301391 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1392 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1393 link_speed);
1394 status = -EINVAL;
1395 goto err;
1396 }
1397
1398 /* On Skyhawk the QOS setting must be done only as a % value */
1399 percent_rate = link_speed / 100;
1400 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1401 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1402 percent_rate);
1403 status = -EINVAL;
1404 goto err;
1405 }
1406
1407config_qos:
1408 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001409 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301410 goto err;
1411
1412 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1413 return 0;
1414
1415err:
1416 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1417 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301418 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001419}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301420static int be_set_vf_link_state(struct net_device *netdev, int vf,
1421 int link_state)
1422{
1423 struct be_adapter *adapter = netdev_priv(netdev);
1424 int status;
1425
1426 if (!sriov_enabled(adapter))
1427 return -EPERM;
1428
1429 if (vf >= adapter->num_vfs)
1430 return -EINVAL;
1431
1432 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301433 if (status) {
1434 dev_err(&adapter->pdev->dev,
1435 "Link state change on VF %d failed: %#x\n", vf, status);
1436 return be_cmd_status(status);
1437 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301438
Kalesh APabccf232014-07-17 16:20:24 +05301439 adapter->vf_cfg[vf].plink_tracking = link_state;
1440
1441 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301442}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001443
Sathya Perla2632baf2013-10-01 16:00:00 +05301444static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1445 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446{
Sathya Perla2632baf2013-10-01 16:00:00 +05301447 aic->rx_pkts_prev = rx_pkts;
1448 aic->tx_reqs_prev = tx_pkts;
1449 aic->jiffies = now;
1450}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001451
Sathya Perla2632baf2013-10-01 16:00:00 +05301452static void be_eqd_update(struct be_adapter *adapter)
1453{
1454 struct be_set_eqd set_eqd[MAX_EVT_QS];
1455 int eqd, i, num = 0, start;
1456 struct be_aic_obj *aic;
1457 struct be_eq_obj *eqo;
1458 struct be_rx_obj *rxo;
1459 struct be_tx_obj *txo;
1460 u64 rx_pkts, tx_pkts;
1461 ulong now;
1462 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001463
Sathya Perla2632baf2013-10-01 16:00:00 +05301464 for_all_evt_queues(adapter, eqo, i) {
1465 aic = &adapter->aic_obj[eqo->idx];
1466 if (!aic->enable) {
1467 if (aic->jiffies)
1468 aic->jiffies = 0;
1469 eqd = aic->et_eqd;
1470 goto modify_eqd;
1471 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472
Sathya Perla2632baf2013-10-01 16:00:00 +05301473 rxo = &adapter->rx_obj[eqo->idx];
1474 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001475 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301476 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001477 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001478
Sathya Perla2632baf2013-10-01 16:00:00 +05301479 txo = &adapter->tx_obj[eqo->idx];
1480 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001481 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301482 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001483 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001484
Sathya Perla4097f662009-03-24 16:40:13 -07001485
Sathya Perla2632baf2013-10-01 16:00:00 +05301486 /* Skip, if wrapped around or first calculation */
1487 now = jiffies;
1488 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1489 rx_pkts < aic->rx_pkts_prev ||
1490 tx_pkts < aic->tx_reqs_prev) {
1491 be_aic_update(aic, rx_pkts, tx_pkts, now);
1492 continue;
1493 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001494
Sathya Perla2632baf2013-10-01 16:00:00 +05301495 delta = jiffies_to_msecs(now - aic->jiffies);
1496 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1497 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1498 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499
Sathya Perla2632baf2013-10-01 16:00:00 +05301500 if (eqd < 8)
1501 eqd = 0;
1502 eqd = min_t(u32, eqd, aic->max_eqd);
1503 eqd = max_t(u32, eqd, aic->min_eqd);
1504
1505 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001506modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301507 if (eqd != aic->prev_eqd) {
1508 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1509 set_eqd[num].eq_id = eqo->q.id;
1510 aic->prev_eqd = eqd;
1511 num++;
1512 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001513 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301514
1515 if (num)
1516 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001517}
1518
Sathya Perla3abcded2010-10-03 22:12:27 -07001519static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301520 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001521{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001522 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001523
Sathya Perlaab1594e2011-07-25 19:10:15 +00001524 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001525 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001526 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001527 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001529 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001530 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001531 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533}
1534
Sathya Perla2e588f82011-03-11 02:49:26 +00001535static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001536{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001537 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301538 * Also ignore ipcksm for ipv6 pkts
1539 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001540 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301541 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001542}
1543
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301544static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001546 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001548 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301549 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550
Sathya Perla3abcded2010-10-03 22:12:27 -07001551 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 BUG_ON(!rx_page_info->page);
1553
Sathya Perlae50287b2014-03-04 12:14:38 +05301554 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001555 dma_unmap_page(&adapter->pdev->dev,
1556 dma_unmap_addr(rx_page_info, bus),
1557 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301558 rx_page_info->last_frag = false;
1559 } else {
1560 dma_sync_single_for_cpu(&adapter->pdev->dev,
1561 dma_unmap_addr(rx_page_info, bus),
1562 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001563 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301565 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 atomic_dec(&rxq->used);
1567 return rx_page_info;
1568}
1569
1570/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571static void be_rx_compl_discard(struct be_rx_obj *rxo,
1572 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001575 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001577 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301578 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001579 put_page(page_info->page);
1580 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 }
1582}
1583
1584/*
1585 * skb_fill_rx_data forms a complete skb for an ether frame
1586 * indicated by rxcp.
1587 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001588static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1589 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001592 u16 i, j;
1593 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 u8 *start;
1595
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301596 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 start = page_address(page_info->page) + page_info->page_offset;
1598 prefetch(start);
1599
1600 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001601 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 skb->len = curr_frag_len;
1604 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001605 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606 /* Complete packet has now been moved to data */
1607 put_page(page_info->page);
1608 skb->data_len = 0;
1609 skb->tail += curr_frag_len;
1610 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001611 hdr_len = ETH_HLEN;
1612 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001614 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 skb_shinfo(skb)->frags[0].page_offset =
1616 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301617 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1618 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001620 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 skb->tail += hdr_len;
1622 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001623 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624
Sathya Perla2e588f82011-03-11 02:49:26 +00001625 if (rxcp->pkt_size <= rx_frag_size) {
1626 BUG_ON(rxcp->num_rcvd != 1);
1627 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 }
1629
1630 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001631 remaining = rxcp->pkt_size - curr_frag_len;
1632 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301633 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001634 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001636 /* Coalesce all frags from the same physical page in one slot */
1637 if (page_info->page_offset == 0) {
1638 /* Fresh page */
1639 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001640 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001641 skb_shinfo(skb)->frags[j].page_offset =
1642 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001643 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001644 skb_shinfo(skb)->nr_frags++;
1645 } else {
1646 put_page(page_info->page);
1647 }
1648
Eric Dumazet9e903e02011-10-18 21:00:24 +00001649 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 skb->len += curr_frag_len;
1651 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001652 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001654 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001656 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657}
1658
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001659/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301660static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001661 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001663 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001664 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001666
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001667 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001668 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001669 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001670 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 return;
1672 }
1673
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001676 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001677 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001678 else
1679 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001681 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001682 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301685
Tom Herbertb6c0e892014-08-27 21:27:17 -07001686 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301687 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Jiri Pirko343e43c2011-08-25 02:50:51 +00001689 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001690 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001691
1692 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693}
1694
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001695/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001696static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1697 struct napi_struct *napi,
1698 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001702 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001703 u16 remaining, curr_frag_len;
1704 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001705
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001707 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001709 return;
1710 }
1711
Sathya Perla2e588f82011-03-11 02:49:26 +00001712 remaining = rxcp->pkt_size;
1713 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301714 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
1716 curr_frag_len = min(remaining, rx_frag_size);
1717
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001718 /* Coalesce all frags from the same physical page in one slot */
1719 if (i == 0 || page_info->page_offset == 0) {
1720 /* First frag or Fresh page */
1721 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001722 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001723 skb_shinfo(skb)->frags[j].page_offset =
1724 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001725 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001726 } else {
1727 put_page(page_info->page);
1728 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001729 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001730 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732 memset(page_info, 0, sizeof(*page_info));
1733 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001734 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001736 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001737 skb->len = rxcp->pkt_size;
1738 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001739 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001740 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001741 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301743
Tom Herbertb6c0e892014-08-27 21:27:17 -07001744 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301745 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001746
Jiri Pirko343e43c2011-08-25 02:50:51 +00001747 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001748 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001749
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001750 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751}
1752
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001753static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1754 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301756 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1757 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1758 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1759 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1760 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1761 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1762 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1763 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1764 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1765 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1766 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001767 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301768 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1769 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001770 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301771 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301772 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301773 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001774}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001776static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1777 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001778{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301779 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1780 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1781 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1782 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1783 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1784 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1785 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1786 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1787 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1788 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1789 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001790 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301791 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1792 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001793 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301794 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1795 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001796}
1797
1798static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1799{
1800 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1801 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1802 struct be_adapter *adapter = rxo->adapter;
1803
1804 /* For checking the valid bit it is Ok to use either definition as the
1805 * valid bit is at the same position in both v0 and v1 Rx compl */
1806 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807 return NULL;
1808
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001809 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001810 be_dws_le_to_cpu(compl, sizeof(*compl));
1811
1812 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001814 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001816
Somnath Koture38b1702013-05-29 22:55:56 +00001817 if (rxcp->ip_frag)
1818 rxcp->l4_csum = 0;
1819
Sathya Perla15d72182011-03-21 20:49:26 +00001820 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301821 /* In QNQ modes, if qnq bit is not set, then the packet was
1822 * tagged only with the transparent outer vlan-tag and must
1823 * not be treated as a vlan packet by host
1824 */
1825 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001826 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001827
Sathya Perla15d72182011-03-21 20:49:26 +00001828 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001829 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001830
Somnath Kotur939cf302011-08-18 21:51:49 -07001831 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301832 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001833 rxcp->vlanf = 0;
1834 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001835
1836 /* As the compl has been parsed, reset it; we wont touch it again */
1837 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838
Sathya Perla3abcded2010-10-03 22:12:27 -07001839 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 return rxcp;
1841}
1842
Eric Dumazet1829b082011-03-01 05:48:12 +00001843static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001846
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001848 gfp |= __GFP_COMP;
1849 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850}
1851
1852/*
1853 * Allocate a page, split it to fragments of size rx_frag_size and post as
1854 * receive buffers to BE
1855 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001856static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857{
Sathya Perla3abcded2010-10-03 22:12:27 -07001858 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001859 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001860 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001862 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 struct be_eth_rx_d *rxd;
1864 u64 page_dmaaddr = 0, frag_dmaaddr;
1865 u32 posted, page_offset = 0;
1866
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1869 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001870 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001872 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 break;
1874 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001875 page_dmaaddr = dma_map_page(dev, pagep, 0,
1876 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001877 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001878 if (dma_mapping_error(dev, page_dmaaddr)) {
1879 put_page(pagep);
1880 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301881 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001882 break;
1883 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301884 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 } else {
1886 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301887 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301889 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891
1892 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301893 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1895 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896
1897 /* Any space left in the current big page for another frag? */
1898 if ((page_offset + rx_frag_size + rx_frag_size) >
1899 adapter->big_page_size) {
1900 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301901 page_info->last_frag = true;
1902 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1903 } else {
1904 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001906
1907 prev_page_info = page_info;
1908 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301911
1912 /* Mark the last frag of a page when we break out of the above loop
1913 * with no more slots available in the RXQ
1914 */
1915 if (pagep) {
1916 prev_page_info->last_frag = true;
1917 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1918 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919
1920 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301922 if (rxo->rx_post_starved)
1923 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001924 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001925 } else if (atomic_read(&rxq->used) == 0) {
1926 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001927 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929}
1930
Sathya Perla5fb379e2009-06-18 00:02:59 +00001931static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1934
1935 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1936 return NULL;
1937
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001938 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1940
1941 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1942
1943 queue_tail_inc(tx_cq);
1944 return txcp;
1945}
1946
Sathya Perla3c8def92011-06-12 20:01:58 +00001947static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301948 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949{
Sathya Perla3c8def92011-06-12 20:01:58 +00001950 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001951 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001952 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001954 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1955 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001957 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001959 sent_skbs[txq->tail] = NULL;
1960
1961 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001962 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001964 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001966 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001967 unmap_tx_frag(&adapter->pdev->dev, wrb,
1968 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001969 unmap_skb_hdr = false;
1970
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 num_wrbs++;
1972 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001973 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974
Rick Jones96d49222014-08-28 08:53:16 -07001975 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001976 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977}
1978
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979/* Return the number of events in the event queue */
1980static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001981{
1982 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001984
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 do {
1986 eqe = queue_tail_node(&eqo->q);
1987 if (eqe->evt == 0)
1988 break;
1989
1990 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001991 eqe->evt = 0;
1992 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 queue_tail_inc(&eqo->q);
1994 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001995
1996 return num;
1997}
1998
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999/* Leaves the EQ is disarmed state */
2000static void be_eq_clean(struct be_eq_obj *eqo)
2001{
2002 int num = events_get(eqo);
2003
2004 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2005}
2006
2007static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008{
2009 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002010 struct be_queue_info *rxq = &rxo->q;
2011 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002012 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002013 struct be_adapter *adapter = rxo->adapter;
2014 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015
Sathya Perlad23e9462012-12-17 19:38:51 +00002016 /* Consume pending rx completions.
2017 * Wait for the flush completion (identified by zero num_rcvd)
2018 * to arrive. Notify CQ even when there are no more CQ entries
2019 * for HW to flush partially coalesced CQ entries.
2020 * In Lancer, there is no need to wait for flush compl.
2021 */
2022 for (;;) {
2023 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302024 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002025 if (lancer_chip(adapter))
2026 break;
2027
2028 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2029 dev_warn(&adapter->pdev->dev,
2030 "did not receive flush compl\n");
2031 break;
2032 }
2033 be_cq_notify(adapter, rx_cq->id, true, 0);
2034 mdelay(1);
2035 } else {
2036 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002037 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002038 if (rxcp->num_rcvd == 0)
2039 break;
2040 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041 }
2042
Sathya Perlad23e9462012-12-17 19:38:51 +00002043 /* After cleanup, leave the CQ in unarmed state */
2044 be_cq_notify(adapter, rx_cq->id, false, 0);
2045
2046 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302047 while (atomic_read(&rxq->used) > 0) {
2048 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049 put_page(page_info->page);
2050 memset(page_info, 0, sizeof(*page_info));
2051 }
2052 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002053 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054}
2055
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002056static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002058 struct be_tx_obj *txo;
2059 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002060 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002061 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002062 struct sk_buff *sent_skb;
2063 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002064 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302066 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002067 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002068 pending_txqs = adapter->num_tx_qs;
2069
2070 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302071 cmpl = 0;
2072 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002073 txq = &txo->q;
2074 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302075 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002076 num_wrbs += be_tx_compl_process(adapter, txo,
2077 end_idx);
2078 cmpl++;
2079 }
2080 if (cmpl) {
2081 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2082 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302083 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002084 }
2085 if (atomic_read(&txq->used) == 0)
2086 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002087 }
2088
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302089 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002090 break;
2091
2092 mdelay(1);
2093 } while (true);
2094
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002095 for_all_tx_queues(adapter, txo, i) {
2096 txq = &txo->q;
2097 if (atomic_read(&txq->used))
2098 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2099 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002100
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002101 /* free posted tx for which compls will never arrive */
2102 while (atomic_read(&txq->used)) {
2103 sent_skb = txo->sent_skb_list[txq->tail];
2104 end_idx = txq->tail;
2105 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2106 &dummy_wrb);
2107 index_adv(&end_idx, num_wrbs - 1, txq->len);
2108 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2109 atomic_sub(num_wrbs, &txq->used);
2110 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002111 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112}
2113
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114static void be_evt_queues_destroy(struct be_adapter *adapter)
2115{
2116 struct be_eq_obj *eqo;
2117 int i;
2118
2119 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002120 if (eqo->q.created) {
2121 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302123 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302124 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002125 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 be_queue_free(adapter, &eqo->q);
2127 }
2128}
2129
2130static int be_evt_queues_create(struct be_adapter *adapter)
2131{
2132 struct be_queue_info *eq;
2133 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302134 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 int i, rc;
2136
Sathya Perla92bf14a2013-08-27 16:57:32 +05302137 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2138 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139
2140 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302141 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2142 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302143 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302144 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 eqo->adapter = adapter;
2146 eqo->tx_budget = BE_TX_BUDGET;
2147 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302148 aic->max_eqd = BE_MAX_EQD;
2149 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150
2151 eq = &eqo->q;
2152 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302153 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154 if (rc)
2155 return rc;
2156
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302157 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 if (rc)
2159 return rc;
2160 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002161 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162}
2163
Sathya Perla5fb379e2009-06-18 00:02:59 +00002164static void be_mcc_queues_destroy(struct be_adapter *adapter)
2165{
2166 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002167
Sathya Perla8788fdc2009-07-27 22:52:03 +00002168 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002169 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002170 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002171 be_queue_free(adapter, q);
2172
Sathya Perla8788fdc2009-07-27 22:52:03 +00002173 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002174 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002175 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176 be_queue_free(adapter, q);
2177}
2178
2179/* Must be called only after TX qs are created as MCC shares TX EQ */
2180static int be_mcc_queues_create(struct be_adapter *adapter)
2181{
2182 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183
Sathya Perla8788fdc2009-07-27 22:52:03 +00002184 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302186 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187 goto err;
2188
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189 /* Use the default EQ for MCC completions */
2190 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002191 goto mcc_cq_free;
2192
Sathya Perla8788fdc2009-07-27 22:52:03 +00002193 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2195 goto mcc_cq_destroy;
2196
Sathya Perla8788fdc2009-07-27 22:52:03 +00002197 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 goto mcc_q_free;
2199
2200 return 0;
2201
2202mcc_q_free:
2203 be_queue_free(adapter, q);
2204mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002205 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002206mcc_cq_free:
2207 be_queue_free(adapter, cq);
2208err:
2209 return -1;
2210}
2211
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212static void be_tx_queues_destroy(struct be_adapter *adapter)
2213{
2214 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002215 struct be_tx_obj *txo;
2216 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217
Sathya Perla3c8def92011-06-12 20:01:58 +00002218 for_all_tx_queues(adapter, txo, i) {
2219 q = &txo->q;
2220 if (q->created)
2221 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2222 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223
Sathya Perla3c8def92011-06-12 20:01:58 +00002224 q = &txo->cq;
2225 if (q->created)
2226 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2227 be_queue_free(adapter, q);
2228 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229}
2230
Sathya Perla77071332013-08-27 16:57:34 +05302231static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002234 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302235 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236
Sathya Perla92bf14a2013-08-27 16:57:32 +05302237 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002238
Sathya Perla3c8def92011-06-12 20:01:58 +00002239 for_all_tx_queues(adapter, txo, i) {
2240 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2242 sizeof(struct be_eth_tx_compl));
2243 if (status)
2244 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245
John Stultz827da442013-10-07 15:51:58 -07002246 u64_stats_init(&txo->stats.sync);
2247 u64_stats_init(&txo->stats.sync_compl);
2248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 /* If num_evt_qs is less than num_tx_qs, then more than
2250 * one txq share an eq
2251 */
2252 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2253 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2254 if (status)
2255 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2258 sizeof(struct be_eth_wrb));
2259 if (status)
2260 return status;
2261
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002262 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263 if (status)
2264 return status;
2265 }
2266
Sathya Perlad3791422012-09-28 04:39:44 +00002267 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2268 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 return 0;
2270}
2271
2272static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273{
2274 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 struct be_rx_obj *rxo;
2276 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002279 q = &rxo->cq;
2280 if (q->created)
2281 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2282 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284}
2285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002287{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002289 struct be_rx_obj *rxo;
2290 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291
Sathya Perla92bf14a2013-08-27 16:57:32 +05302292 /* We can create as many RSS rings as there are EQs. */
2293 adapter->num_rx_qs = adapter->num_evt_qs;
2294
2295 /* We'll use RSS only if atleast 2 RSS rings are supported.
2296 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302298 if (adapter->num_rx_qs > 1)
2299 adapter->num_rx_qs++;
2300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 for_all_rx_queues(adapter, rxo, i) {
2303 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 cq = &rxo->cq;
2305 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302306 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002307 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002308 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309
John Stultz827da442013-10-07 15:51:58 -07002310 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2312 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316
Sathya Perlad3791422012-09-28 04:39:44 +00002317 dev_info(&adapter->pdev->dev,
2318 "created %d RSS queue(s) and 1 default RX queue\n",
2319 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002321}
2322
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323static irqreturn_t be_intx(int irq, void *dev)
2324{
Sathya Perlae49cc342012-11-27 19:50:02 +00002325 struct be_eq_obj *eqo = dev;
2326 struct be_adapter *adapter = eqo->adapter;
2327 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002329 /* IRQ is not expected when NAPI is scheduled as the EQ
2330 * will not be armed.
2331 * But, this can happen on Lancer INTx where it takes
2332 * a while to de-assert INTx or in BE2 where occasionaly
2333 * an interrupt may be raised even when EQ is unarmed.
2334 * If NAPI is already scheduled, then counting & notifying
2335 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002336 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002337 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002338 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002339 __napi_schedule(&eqo->napi);
2340 if (num_evts)
2341 eqo->spurious_intr = 0;
2342 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002343 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002344
2345 /* Return IRQ_HANDLED only for the the first spurious intr
2346 * after a valid intr to stop the kernel from branding
2347 * this irq as a bad one!
2348 */
2349 if (num_evts || eqo->spurious_intr++ == 0)
2350 return IRQ_HANDLED;
2351 else
2352 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353}
2354
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358
Sathya Perla0b545a62012-11-23 00:27:18 +00002359 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2360 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361 return IRQ_HANDLED;
2362}
2363
Sathya Perla2e588f82011-03-11 02:49:26 +00002364static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365{
Somnath Koture38b1702013-05-29 22:55:56 +00002366 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367}
2368
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302370 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371{
Sathya Perla3abcded2010-10-03 22:12:27 -07002372 struct be_adapter *adapter = rxo->adapter;
2373 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002374 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375 u32 work_done;
2376
2377 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002378 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379 if (!rxcp)
2380 break;
2381
Sathya Perla12004ae2011-08-02 19:57:46 +00002382 /* Is it a flush compl that has no data */
2383 if (unlikely(rxcp->num_rcvd == 0))
2384 goto loop_continue;
2385
2386 /* Discard compl with partial DMA Lancer B0 */
2387 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002389 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002390 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002391
Sathya Perla12004ae2011-08-02 19:57:46 +00002392 /* On BE drop pkts that arrive due to imperfect filtering in
2393 * promiscuous mode on some skews
2394 */
2395 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302396 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002398 goto loop_continue;
2399 }
2400
Sathya Perla6384a4d2013-10-25 10:40:16 +05302401 /* Don't do gro when we're busy_polling */
2402 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002404 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302405 be_rx_compl_process(rxo, napi, rxcp);
2406
Sathya Perla12004ae2011-08-02 19:57:46 +00002407loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002408 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409 }
2410
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002411 if (work_done) {
2412 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002413
Sathya Perla6384a4d2013-10-25 10:40:16 +05302414 /* When an rx-obj gets into post_starved state, just
2415 * let be_worker do the posting.
2416 */
2417 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2418 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002419 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422 return work_done;
2423}
2424
Kalesh AP512bb8a2014-09-02 09:56:49 +05302425static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2426{
2427 switch (status) {
2428 case BE_TX_COMP_HDR_PARSE_ERR:
2429 tx_stats(txo)->tx_hdr_parse_err++;
2430 break;
2431 case BE_TX_COMP_NDMA_ERR:
2432 tx_stats(txo)->tx_dma_err++;
2433 break;
2434 case BE_TX_COMP_ACL_ERR:
2435 tx_stats(txo)->tx_spoof_check_err++;
2436 break;
2437 }
2438}
2439
2440static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2441{
2442 switch (status) {
2443 case LANCER_TX_COMP_LSO_ERR:
2444 tx_stats(txo)->tx_tso_err++;
2445 break;
2446 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2447 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2448 tx_stats(txo)->tx_spoof_check_err++;
2449 break;
2450 case LANCER_TX_COMP_QINQ_ERR:
2451 tx_stats(txo)->tx_qinq_err++;
2452 break;
2453 case LANCER_TX_COMP_PARITY_ERR:
2454 tx_stats(txo)->tx_internal_parity_err++;
2455 break;
2456 case LANCER_TX_COMP_DMA_ERR:
2457 tx_stats(txo)->tx_dma_err++;
2458 break;
2459 }
2460}
2461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2463 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 int num_wrbs = 0, work_done;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302467 u32 compl_status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002468
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 for (work_done = 0; work_done < budget; work_done++) {
2470 txcp = be_tx_compl_get(&txo->cq);
2471 if (!txcp)
2472 break;
2473 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302474 GET_TX_COMPL_BITS(wrb_index,
2475 txcp));
Kalesh AP512bb8a2014-09-02 09:56:49 +05302476 compl_status = GET_TX_COMPL_BITS(status, txcp);
2477 if (compl_status) {
2478 if (lancer_chip(adapter))
2479 lancer_update_tx_err(txo, compl_status);
2480 else
2481 be_update_tx_err(txo, compl_status);
2482 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 }
2484
2485 if (work_done) {
2486 be_cq_notify(adapter, txo->cq.id, true, work_done);
2487 atomic_sub(num_wrbs, &txo->q.used);
2488
2489 /* As Tx wrbs have been freed up, wake up netdev queue
2490 * if it was stopped due to lack of tx wrbs. */
2491 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302492 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002493 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002494 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002495
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002496 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2497 tx_stats(txo)->tx_compl += work_done;
2498 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2499 }
2500 return (work_done < budget); /* Done */
2501}
Sathya Perla3c8def92011-06-12 20:01:58 +00002502
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302503int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002504{
2505 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2506 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002507 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302508 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002509 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002510
Sathya Perla0b545a62012-11-23 00:27:18 +00002511 num_evts = events_get(eqo);
2512
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002513 /* Process all TXQs serviced by this EQ */
2514 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2515 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2516 eqo->tx_budget, i);
2517 if (!tx_done)
2518 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519 }
2520
Sathya Perla6384a4d2013-10-25 10:40:16 +05302521 if (be_lock_napi(eqo)) {
2522 /* This loop will iterate twice for EQ0 in which
2523 * completions of the last RXQ (default one) are also processed
2524 * For other EQs the loop iterates only once
2525 */
2526 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2527 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2528 max_work = max(work, max_work);
2529 }
2530 be_unlock_napi(eqo);
2531 } else {
2532 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002533 }
2534
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 if (is_mcc_eqo(eqo))
2536 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002537
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002538 if (max_work < budget) {
2539 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002540 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 } else {
2542 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002543 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002544 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546}
2547
Sathya Perla6384a4d2013-10-25 10:40:16 +05302548#ifdef CONFIG_NET_RX_BUSY_POLL
2549static int be_busy_poll(struct napi_struct *napi)
2550{
2551 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2552 struct be_adapter *adapter = eqo->adapter;
2553 struct be_rx_obj *rxo;
2554 int i, work = 0;
2555
2556 if (!be_lock_busy_poll(eqo))
2557 return LL_FLUSH_BUSY;
2558
2559 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2560 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2561 if (work)
2562 break;
2563 }
2564
2565 be_unlock_busy_poll(eqo);
2566 return work;
2567}
2568#endif
2569
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002570void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002571{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002572 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2573 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002574 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302575 bool error_detected = false;
2576 struct device *dev = &adapter->pdev->dev;
2577 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002578
Sathya Perlad23e9462012-12-17 19:38:51 +00002579 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002580 return;
2581
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002582 if (lancer_chip(adapter)) {
2583 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2584 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2585 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302586 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002587 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302588 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302589 adapter->hw_error = true;
2590 /* Do not log error messages if its a FW reset */
2591 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2592 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2593 dev_info(dev, "Firmware update in progress\n");
2594 } else {
2595 error_detected = true;
2596 dev_err(dev, "Error detected in the card\n");
2597 dev_err(dev, "ERR: sliport status 0x%x\n",
2598 sliport_status);
2599 dev_err(dev, "ERR: sliport error1 0x%x\n",
2600 sliport_err1);
2601 dev_err(dev, "ERR: sliport error2 0x%x\n",
2602 sliport_err2);
2603 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002604 }
2605 } else {
2606 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302607 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002608 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302609 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002610 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302611 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002612 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302613 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002614
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002615 ue_lo = (ue_lo & ~ue_lo_mask);
2616 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002617
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302618 /* On certain platforms BE hardware can indicate spurious UEs.
2619 * Allow HW to stop working completely in case of a real UE.
2620 * Hence not setting the hw_error for UE detection.
2621 */
2622
2623 if (ue_lo || ue_hi) {
2624 error_detected = true;
2625 dev_err(dev,
2626 "Unrecoverable Error detected in the adapter");
2627 dev_err(dev, "Please reboot server to recover");
2628 if (skyhawk_chip(adapter))
2629 adapter->hw_error = true;
2630 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2631 if (ue_lo & 1)
2632 dev_err(dev, "UE: %s bit set\n",
2633 ue_status_low_desc[i]);
2634 }
2635 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2636 if (ue_hi & 1)
2637 dev_err(dev, "UE: %s bit set\n",
2638 ue_status_hi_desc[i]);
2639 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302640 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002641 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302642 if (error_detected)
2643 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002644}
2645
Sathya Perla8d56ff12009-11-22 22:02:26 +00002646static void be_msix_disable(struct be_adapter *adapter)
2647{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002648 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002649 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002650 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302651 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002652 }
2653}
2654
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002655static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002656{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002657 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002658 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659
Sathya Perla92bf14a2013-08-27 16:57:32 +05302660 /* If RoCE is supported, program the max number of NIC vectors that
2661 * may be configured via set-channels, along with vectors needed for
2662 * RoCe. Else, just program the number we'll use initially.
2663 */
2664 if (be_roce_supported(adapter))
2665 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2666 2 * num_online_cpus());
2667 else
2668 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002669
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002670 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671 adapter->msix_entries[i].entry = i;
2672
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002673 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2674 MIN_MSIX_VECTORS, num_vec);
2675 if (num_vec < 0)
2676 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002677
Sathya Perla92bf14a2013-08-27 16:57:32 +05302678 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2679 adapter->num_msix_roce_vec = num_vec / 2;
2680 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2681 adapter->num_msix_roce_vec);
2682 }
2683
2684 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2685
2686 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2687 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002688 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002689
2690fail:
2691 dev_warn(dev, "MSIx enable failed\n");
2692
2693 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2694 if (!be_physfn(adapter))
2695 return num_vec;
2696 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697}
2698
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002699static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302700 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302702 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703}
2704
2705static int be_msix_register(struct be_adapter *adapter)
2706{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707 struct net_device *netdev = adapter->netdev;
2708 struct be_eq_obj *eqo;
2709 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002711 for_all_evt_queues(adapter, eqo, i) {
2712 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2713 vec = be_msix_vec_get(adapter, eqo);
2714 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002715 if (status)
2716 goto err_msix;
2717 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002718
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002720err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002721 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2722 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2723 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302724 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002725 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726 return status;
2727}
2728
2729static int be_irq_register(struct be_adapter *adapter)
2730{
2731 struct net_device *netdev = adapter->netdev;
2732 int status;
2733
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002734 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735 status = be_msix_register(adapter);
2736 if (status == 0)
2737 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002738 /* INTx is not supported for VF */
2739 if (!be_physfn(adapter))
2740 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741 }
2742
Sathya Perlae49cc342012-11-27 19:50:02 +00002743 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744 netdev->irq = adapter->pdev->irq;
2745 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002746 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 if (status) {
2748 dev_err(&adapter->pdev->dev,
2749 "INTx request IRQ failed - err %d\n", status);
2750 return status;
2751 }
2752done:
2753 adapter->isr_registered = true;
2754 return 0;
2755}
2756
2757static void be_irq_unregister(struct be_adapter *adapter)
2758{
2759 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002760 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002761 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762
2763 if (!adapter->isr_registered)
2764 return;
2765
2766 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002767 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002768 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002769 goto done;
2770 }
2771
2772 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002773 for_all_evt_queues(adapter, eqo, i)
2774 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002775
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002776done:
2777 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778}
2779
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002780static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002781{
2782 struct be_queue_info *q;
2783 struct be_rx_obj *rxo;
2784 int i;
2785
2786 for_all_rx_queues(adapter, rxo, i) {
2787 q = &rxo->q;
2788 if (q->created) {
2789 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002791 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002792 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002793 }
2794}
2795
Sathya Perla889cd4b2010-05-30 23:33:45 +00002796static int be_close(struct net_device *netdev)
2797{
2798 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002799 struct be_eq_obj *eqo;
2800 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002801
Kalesh APe1ad8e32014-04-14 16:12:41 +05302802 /* This protection is needed as be_close() may be called even when the
2803 * adapter is in cleared state (after eeh perm failure)
2804 */
2805 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2806 return 0;
2807
Parav Pandit045508a2012-03-26 14:27:13 +00002808 be_roce_dev_close(adapter);
2809
Ivan Veceradff345c52013-11-27 08:59:32 +01002810 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2811 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002812 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302813 be_disable_busy_poll(eqo);
2814 }
David S. Miller71237b62013-11-28 18:53:36 -05002815 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002816 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002817
2818 be_async_mcc_disable(adapter);
2819
2820 /* Wait for all pending tx completions to arrive so that
2821 * all tx skbs are freed.
2822 */
Sathya Perlafba87552013-05-08 02:05:50 +00002823 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302824 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002825
2826 be_rx_qs_destroy(adapter);
2827
Ajit Khaparded11a3472013-11-18 10:44:37 -06002828 for (i = 1; i < (adapter->uc_macs + 1); i++)
2829 be_cmd_pmac_del(adapter, adapter->if_handle,
2830 adapter->pmac_id[i], 0);
2831 adapter->uc_macs = 0;
2832
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002833 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 if (msix_enabled(adapter))
2835 synchronize_irq(be_msix_vec_get(adapter, eqo));
2836 else
2837 synchronize_irq(netdev->irq);
2838 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002839 }
2840
Sathya Perla889cd4b2010-05-30 23:33:45 +00002841 be_irq_unregister(adapter);
2842
Sathya Perla482c9e72011-06-29 23:33:17 +00002843 return 0;
2844}
2845
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002847{
2848 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002849 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302850 u8 rss_hkey[RSS_HASH_KEY_LEN];
2851 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002852
2853 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002854 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2855 sizeof(struct be_eth_rx_d));
2856 if (rc)
2857 return rc;
2858 }
2859
2860 /* The FW would like the default RXQ to be created first */
2861 rxo = default_rxo(adapter);
2862 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2863 adapter->if_handle, false, &rxo->rss_id);
2864 if (rc)
2865 return rc;
2866
2867 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002868 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002869 rx_frag_size, adapter->if_handle,
2870 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002871 if (rc)
2872 return rc;
2873 }
2874
2875 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302876 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2877 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002878 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302879 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002880 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302881 rss->rsstable[j + i] = rxo->rss_id;
2882 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002883 }
2884 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302885 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2886 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002887
2888 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302889 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2890 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302891 } else {
2892 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302893 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302894 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002895
Venkata Duvvurue2557872014-04-21 15:38:00 +05302896 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302897 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302898 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302899 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302900 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302901 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002902 }
2903
Venkata Duvvurue2557872014-04-21 15:38:00 +05302904 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2905
Sathya Perla482c9e72011-06-29 23:33:17 +00002906 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002907 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002908 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002909 return 0;
2910}
2911
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002912static int be_open(struct net_device *netdev)
2913{
2914 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002916 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002917 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002918 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002919 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002921 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002922 if (status)
2923 goto err;
2924
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002925 status = be_irq_register(adapter);
2926 if (status)
2927 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002930 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002931
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002932 for_all_tx_queues(adapter, txo, i)
2933 be_cq_notify(adapter, txo->cq.id, true, 0);
2934
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002935 be_async_mcc_enable(adapter);
2936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002937 for_all_evt_queues(adapter, eqo, i) {
2938 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302939 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302940 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002941 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002942 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002943
Sathya Perla323ff712012-09-28 04:39:43 +00002944 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002945 if (!status)
2946 be_link_status_update(adapter, link_status);
2947
Sathya Perlafba87552013-05-08 02:05:50 +00002948 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002949 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302950
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302951#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302952 if (skyhawk_chip(adapter))
2953 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302954#endif
2955
Sathya Perla889cd4b2010-05-30 23:33:45 +00002956 return 0;
2957err:
2958 be_close(adapter->netdev);
2959 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002960}
2961
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002962static int be_setup_wol(struct be_adapter *adapter, bool enable)
2963{
2964 struct be_dma_mem cmd;
2965 int status = 0;
2966 u8 mac[ETH_ALEN];
2967
2968 memset(mac, 0, ETH_ALEN);
2969
2970 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002971 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2972 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302973 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302974 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002975
2976 if (enable) {
2977 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302978 PCICFG_PM_CONTROL_OFFSET,
2979 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002980 if (status) {
2981 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002982 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002983 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2984 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002985 return status;
2986 }
2987 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302988 adapter->netdev->dev_addr,
2989 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002990 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2991 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2992 } else {
2993 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2994 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2995 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2996 }
2997
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002998 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002999 return status;
3000}
3001
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003002/*
3003 * Generate a seed MAC address from the PF MAC Address using jhash.
3004 * MAC Address for VFs are assigned incrementally starting from the seed.
3005 * These addresses are programmed in the ASIC by the PF and the VF driver
3006 * queries for the MAC address during its probe.
3007 */
Sathya Perla4c876612013-02-03 20:30:11 +00003008static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003009{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003010 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003011 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003012 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003013 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003014
3015 be_vf_eth_addr_generate(adapter, mac);
3016
Sathya Perla11ac75e2011-12-13 00:58:50 +00003017 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303018 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003019 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003020 vf_cfg->if_handle,
3021 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303022 else
3023 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3024 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003025
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003026 if (status)
3027 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303028 "Mac address assignment failed for VF %d\n",
3029 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003030 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003031 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003032
3033 mac[5] += 1;
3034 }
3035 return status;
3036}
3037
Sathya Perla4c876612013-02-03 20:30:11 +00003038static int be_vfs_mac_query(struct be_adapter *adapter)
3039{
3040 int status, vf;
3041 u8 mac[ETH_ALEN];
3042 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003043
3044 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303045 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3046 mac, vf_cfg->if_handle,
3047 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003048 if (status)
3049 return status;
3050 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3051 }
3052 return 0;
3053}
3054
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003055static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003056{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003057 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003058 u32 vf;
3059
Sathya Perla257a3fe2013-06-14 15:54:51 +05303060 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003061 dev_warn(&adapter->pdev->dev,
3062 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003063 goto done;
3064 }
3065
Sathya Perlab4c1df92013-05-08 02:05:47 +00003066 pci_disable_sriov(adapter->pdev);
3067
Sathya Perla11ac75e2011-12-13 00:58:50 +00003068 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303069 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003070 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3071 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303072 else
3073 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3074 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003075
Sathya Perla11ac75e2011-12-13 00:58:50 +00003076 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3077 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003078done:
3079 kfree(adapter->vf_cfg);
3080 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303081 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003082}
3083
Sathya Perla77071332013-08-27 16:57:34 +05303084static void be_clear_queues(struct be_adapter *adapter)
3085{
3086 be_mcc_queues_destroy(adapter);
3087 be_rx_cqs_destroy(adapter);
3088 be_tx_queues_destroy(adapter);
3089 be_evt_queues_destroy(adapter);
3090}
3091
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303092static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003093{
Sathya Perla191eb752012-02-23 18:50:13 +00003094 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3095 cancel_delayed_work_sync(&adapter->work);
3096 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3097 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303098}
3099
Somnath Koturb05004a2013-12-05 12:08:16 +05303100static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303101{
3102 int i;
3103
Somnath Koturb05004a2013-12-05 12:08:16 +05303104 if (adapter->pmac_id) {
3105 for (i = 0; i < (adapter->uc_macs + 1); i++)
3106 be_cmd_pmac_del(adapter, adapter->if_handle,
3107 adapter->pmac_id[i], 0);
3108 adapter->uc_macs = 0;
3109
3110 kfree(adapter->pmac_id);
3111 adapter->pmac_id = NULL;
3112 }
3113}
3114
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303115#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303116static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3117{
3118 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3119 be_cmd_manage_iface(adapter, adapter->if_handle,
3120 OP_CONVERT_TUNNEL_TO_NORMAL);
3121
3122 if (adapter->vxlan_port)
3123 be_cmd_set_vxlan_port(adapter, 0);
3124
3125 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3126 adapter->vxlan_port = 0;
3127}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303128#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303129
Somnath Koturb05004a2013-12-05 12:08:16 +05303130static int be_clear(struct be_adapter *adapter)
3131{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303132 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003133
Sathya Perla11ac75e2011-12-13 00:58:50 +00003134 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003135 be_vf_clear(adapter);
3136
Vasundhara Volambec84e62014-06-30 13:01:32 +05303137 /* Re-configure FW to distribute resources evenly across max-supported
3138 * number of VFs, only when VFs are not already enabled.
3139 */
3140 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3141 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3142 pci_sriov_get_totalvfs(adapter->pdev));
3143
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303144#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303145 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303146#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303147 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303148 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003149
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003150 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003151
Sathya Perla77071332013-08-27 16:57:34 +05303152 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003153
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003154 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303155 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003156 return 0;
3157}
3158
Sathya Perla4c876612013-02-03 20:30:11 +00003159static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003160{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303161 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003162 struct be_vf_cfg *vf_cfg;
3163 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003164 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003165
Sathya Perla4c876612013-02-03 20:30:11 +00003166 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3167 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003168
Sathya Perla4c876612013-02-03 20:30:11 +00003169 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303170 if (!BE3_chip(adapter)) {
3171 status = be_cmd_get_profile_config(adapter, &res,
3172 vf + 1);
3173 if (!status)
3174 cap_flags = res.if_cap_flags;
3175 }
Sathya Perla4c876612013-02-03 20:30:11 +00003176
3177 /* If a FW profile exists, then cap_flags are updated */
3178 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303179 BE_IF_FLAGS_BROADCAST |
3180 BE_IF_FLAGS_MULTICAST);
3181 status =
3182 be_cmd_if_create(adapter, cap_flags, en_flags,
3183 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003184 if (status)
3185 goto err;
3186 }
3187err:
3188 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003189}
3190
Sathya Perla39f1d942012-05-08 19:41:24 +00003191static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003192{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003193 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003194 int vf;
3195
Sathya Perla39f1d942012-05-08 19:41:24 +00003196 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3197 GFP_KERNEL);
3198 if (!adapter->vf_cfg)
3199 return -ENOMEM;
3200
Sathya Perla11ac75e2011-12-13 00:58:50 +00003201 for_all_vfs(adapter, vf_cfg, vf) {
3202 vf_cfg->if_handle = -1;
3203 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003204 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003205 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003206}
3207
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003208static int be_vf_setup(struct be_adapter *adapter)
3209{
Sathya Perla4c876612013-02-03 20:30:11 +00003210 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303211 struct be_vf_cfg *vf_cfg;
3212 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303213 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003214
Sathya Perla257a3fe2013-06-14 15:54:51 +05303215 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003216
3217 status = be_vf_setup_init(adapter);
3218 if (status)
3219 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003220
Sathya Perla4c876612013-02-03 20:30:11 +00003221 if (old_vfs) {
3222 for_all_vfs(adapter, vf_cfg, vf) {
3223 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3224 if (status)
3225 goto err;
3226 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003227
Sathya Perla4c876612013-02-03 20:30:11 +00003228 status = be_vfs_mac_query(adapter);
3229 if (status)
3230 goto err;
3231 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303232 status = be_vfs_if_create(adapter);
3233 if (status)
3234 goto err;
3235
Sathya Perla39f1d942012-05-08 19:41:24 +00003236 status = be_vf_eth_addr_config(adapter);
3237 if (status)
3238 goto err;
3239 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003240
Sathya Perla11ac75e2011-12-13 00:58:50 +00003241 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303242 /* Allow VFs to programs MAC/VLAN filters */
3243 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3244 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3245 status = be_cmd_set_fn_privileges(adapter,
3246 privileges |
3247 BE_PRIV_FILTMGMT,
3248 vf + 1);
3249 if (!status)
3250 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3251 vf);
3252 }
3253
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303254 /* Allow full available bandwidth */
3255 if (!old_vfs)
3256 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003257
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303258 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303259 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303260 be_cmd_set_logical_link_config(adapter,
3261 IFLA_VF_LINK_STATE_AUTO,
3262 vf+1);
3263 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003264 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003265
3266 if (!old_vfs) {
3267 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3268 if (status) {
3269 dev_err(dev, "SRIOV enable failed\n");
3270 adapter->num_vfs = 0;
3271 goto err;
3272 }
3273 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303274
3275 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003276 return 0;
3277err:
Sathya Perla4c876612013-02-03 20:30:11 +00003278 dev_err(dev, "VF setup failed\n");
3279 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003280 return status;
3281}
3282
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303283/* Converting function_mode bits on BE3 to SH mc_type enums */
3284
3285static u8 be_convert_mc_type(u32 function_mode)
3286{
Suresh Reddy66064db2014-06-23 16:41:29 +05303287 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303288 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303289 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303290 return FLEX10;
3291 else if (function_mode & VNIC_MODE)
3292 return vNIC2;
3293 else if (function_mode & UMC_ENABLED)
3294 return UMC;
3295 else
3296 return MC_NONE;
3297}
3298
Sathya Perla92bf14a2013-08-27 16:57:32 +05303299/* On BE2/BE3 FW does not suggest the supported limits */
3300static void BEx_get_resources(struct be_adapter *adapter,
3301 struct be_resources *res)
3302{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303303 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303304
3305 if (be_physfn(adapter))
3306 res->max_uc_mac = BE_UC_PMAC_COUNT;
3307 else
3308 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3309
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303310 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3311
3312 if (be_is_mc(adapter)) {
3313 /* Assuming that there are 4 channels per port,
3314 * when multi-channel is enabled
3315 */
3316 if (be_is_qnq_mode(adapter))
3317 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3318 else
3319 /* In a non-qnq multichannel mode, the pvid
3320 * takes up one vlan entry
3321 */
3322 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3323 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303324 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303325 }
3326
Sathya Perla92bf14a2013-08-27 16:57:32 +05303327 res->max_mcast_mac = BE_MAX_MC;
3328
Vasundhara Volama5243da2014-03-11 18:53:07 +05303329 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3330 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3331 * *only* if it is RSS-capable.
3332 */
3333 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3334 !be_physfn(adapter) || (be_is_mc(adapter) &&
3335 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303336 res->max_tx_qs = 1;
3337 else
3338 res->max_tx_qs = BE3_MAX_TX_QS;
3339
3340 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3341 !use_sriov && be_physfn(adapter))
3342 res->max_rss_qs = (adapter->be3_native) ?
3343 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3344 res->max_rx_qs = res->max_rss_qs + 1;
3345
Suresh Reddye3dc8672014-01-06 13:02:25 +05303346 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303347 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303348 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3349 else
3350 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303351
3352 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3353 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3354 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3355}
3356
Sathya Perla30128032011-11-10 19:17:57 +00003357static void be_setup_init(struct be_adapter *adapter)
3358{
3359 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003360 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003361 adapter->if_handle = -1;
3362 adapter->be3_native = false;
3363 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003364 if (be_physfn(adapter))
3365 adapter->cmd_privileges = MAX_PRIVILEGES;
3366 else
3367 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003368}
3369
Vasundhara Volambec84e62014-06-30 13:01:32 +05303370static int be_get_sriov_config(struct be_adapter *adapter)
3371{
3372 struct device *dev = &adapter->pdev->dev;
3373 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303374 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303375
3376 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303377 be_cmd_get_profile_config(adapter, &res, 0);
3378
Vasundhara Volambec84e62014-06-30 13:01:32 +05303379 if (BE3_chip(adapter) && !res.max_vfs) {
3380 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3381 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3382 }
3383
Sathya Perlad3d18312014-08-01 17:47:30 +05303384 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303385
3386 if (!be_max_vfs(adapter)) {
3387 if (num_vfs)
3388 dev_warn(dev, "device doesn't support SRIOV\n");
3389 adapter->num_vfs = 0;
3390 return 0;
3391 }
3392
Sathya Perlad3d18312014-08-01 17:47:30 +05303393 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3394
Vasundhara Volambec84e62014-06-30 13:01:32 +05303395 /* validate num_vfs module param */
3396 old_vfs = pci_num_vf(adapter->pdev);
3397 if (old_vfs) {
3398 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3399 if (old_vfs != num_vfs)
3400 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3401 adapter->num_vfs = old_vfs;
3402 } else {
3403 if (num_vfs > be_max_vfs(adapter)) {
3404 dev_info(dev, "Resources unavailable to init %d VFs\n",
3405 num_vfs);
3406 dev_info(dev, "Limiting to %d VFs\n",
3407 be_max_vfs(adapter));
3408 }
3409 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3410 }
3411
3412 return 0;
3413}
3414
Sathya Perla92bf14a2013-08-27 16:57:32 +05303415static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003416{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303417 struct device *dev = &adapter->pdev->dev;
3418 struct be_resources res = {0};
3419 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003420
Sathya Perla92bf14a2013-08-27 16:57:32 +05303421 if (BEx_chip(adapter)) {
3422 BEx_get_resources(adapter, &res);
3423 adapter->res = res;
3424 }
3425
Sathya Perla92bf14a2013-08-27 16:57:32 +05303426 /* For Lancer, SH etc read per-function resource limits from FW.
3427 * GET_FUNC_CONFIG returns per function guaranteed limits.
3428 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3429 */
Sathya Perla4c876612013-02-03 20:30:11 +00003430 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303431 status = be_cmd_get_func_config(adapter, &res);
3432 if (status)
3433 return status;
3434
3435 /* If RoCE may be enabled stash away half the EQs for RoCE */
3436 if (be_roce_supported(adapter))
3437 res.max_evt_qs /= 2;
3438 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003439 }
3440
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303441 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3442 be_max_txqs(adapter), be_max_rxqs(adapter),
3443 be_max_rss(adapter), be_max_eqs(adapter),
3444 be_max_vfs(adapter));
3445 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3446 be_max_uc(adapter), be_max_mc(adapter),
3447 be_max_vlans(adapter));
3448
Sathya Perla92bf14a2013-08-27 16:57:32 +05303449 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003450}
3451
Sathya Perlad3d18312014-08-01 17:47:30 +05303452static void be_sriov_config(struct be_adapter *adapter)
3453{
3454 struct device *dev = &adapter->pdev->dev;
3455 int status;
3456
3457 status = be_get_sriov_config(adapter);
3458 if (status) {
3459 dev_err(dev, "Failed to query SR-IOV configuration\n");
3460 dev_err(dev, "SR-IOV cannot be enabled\n");
3461 return;
3462 }
3463
3464 /* When the HW is in SRIOV capable configuration, the PF-pool
3465 * resources are equally distributed across the max-number of
3466 * VFs. The user may request only a subset of the max-vfs to be
3467 * enabled. Based on num_vfs, redistribute the resources across
3468 * num_vfs so that each VF will have access to more number of
3469 * resources. This facility is not available in BE3 FW.
3470 * Also, this is done by FW in Lancer chip.
3471 */
3472 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3473 status = be_cmd_set_sriov_config(adapter,
3474 adapter->pool_res,
3475 adapter->num_vfs);
3476 if (status)
3477 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3478 }
3479}
3480
Sathya Perla39f1d942012-05-08 19:41:24 +00003481static int be_get_config(struct be_adapter *adapter)
3482{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303483 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003484 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003485
Kalesh APe97e3cd2014-07-17 16:20:26 +05303486 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003487 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303488 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003489
Vasundhara Volam542963b2014-01-15 13:23:33 +05303490 if (be_physfn(adapter)) {
3491 status = be_cmd_get_active_profile(adapter, &profile_id);
3492 if (!status)
3493 dev_info(&adapter->pdev->dev,
3494 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303495 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303496
Sathya Perlad3d18312014-08-01 17:47:30 +05303497 if (!BE2_chip(adapter) && be_physfn(adapter))
3498 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303499
Sathya Perla92bf14a2013-08-27 16:57:32 +05303500 status = be_get_resources(adapter);
3501 if (status)
3502 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003503
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303504 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3505 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303506 if (!adapter->pmac_id)
3507 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003508
Sathya Perla92bf14a2013-08-27 16:57:32 +05303509 /* Sanitize cfg_num_qs based on HW and platform limits */
3510 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3511
3512 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003513}
3514
Sathya Perla95046b92013-07-23 15:25:02 +05303515static int be_mac_setup(struct be_adapter *adapter)
3516{
3517 u8 mac[ETH_ALEN];
3518 int status;
3519
3520 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3521 status = be_cmd_get_perm_mac(adapter, mac);
3522 if (status)
3523 return status;
3524
3525 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3526 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3527 } else {
3528 /* Maybe the HW was reset; dev_addr must be re-programmed */
3529 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3530 }
3531
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003532 /* For BE3-R VFs, the PF programs the initial MAC address */
3533 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3534 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3535 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303536 return 0;
3537}
3538
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303539static void be_schedule_worker(struct be_adapter *adapter)
3540{
3541 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3542 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3543}
3544
Sathya Perla77071332013-08-27 16:57:34 +05303545static int be_setup_queues(struct be_adapter *adapter)
3546{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303547 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303548 int status;
3549
3550 status = be_evt_queues_create(adapter);
3551 if (status)
3552 goto err;
3553
3554 status = be_tx_qs_create(adapter);
3555 if (status)
3556 goto err;
3557
3558 status = be_rx_cqs_create(adapter);
3559 if (status)
3560 goto err;
3561
3562 status = be_mcc_queues_create(adapter);
3563 if (status)
3564 goto err;
3565
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303566 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3567 if (status)
3568 goto err;
3569
3570 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3571 if (status)
3572 goto err;
3573
Sathya Perla77071332013-08-27 16:57:34 +05303574 return 0;
3575err:
3576 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3577 return status;
3578}
3579
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303580int be_update_queues(struct be_adapter *adapter)
3581{
3582 struct net_device *netdev = adapter->netdev;
3583 int status;
3584
3585 if (netif_running(netdev))
3586 be_close(netdev);
3587
3588 be_cancel_worker(adapter);
3589
3590 /* If any vectors have been shared with RoCE we cannot re-program
3591 * the MSIx table.
3592 */
3593 if (!adapter->num_msix_roce_vec)
3594 be_msix_disable(adapter);
3595
3596 be_clear_queues(adapter);
3597
3598 if (!msix_enabled(adapter)) {
3599 status = be_msix_enable(adapter);
3600 if (status)
3601 return status;
3602 }
3603
3604 status = be_setup_queues(adapter);
3605 if (status)
3606 return status;
3607
3608 be_schedule_worker(adapter);
3609
3610 if (netif_running(netdev))
3611 status = be_open(netdev);
3612
3613 return status;
3614}
3615
Sathya Perla5fb379e2009-06-18 00:02:59 +00003616static int be_setup(struct be_adapter *adapter)
3617{
Sathya Perla39f1d942012-05-08 19:41:24 +00003618 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303619 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003620 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003621
Sathya Perla30128032011-11-10 19:17:57 +00003622 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003623
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003624 if (!lancer_chip(adapter))
3625 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003626
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003627 status = be_get_config(adapter);
3628 if (status)
3629 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003630
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003631 status = be_msix_enable(adapter);
3632 if (status)
3633 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003634
Sathya Perla77071332013-08-27 16:57:34 +05303635 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3636 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3637 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3638 en_flags |= BE_IF_FLAGS_RSS;
3639 en_flags = en_flags & be_if_cap_flags(adapter);
3640 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3641 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003642 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003643 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303645 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3646 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303647 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303648 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003649 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003650 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003651
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003652 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003653
Sathya Perla95046b92013-07-23 15:25:02 +05303654 status = be_mac_setup(adapter);
3655 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003656 goto err;
3657
Kalesh APe97e3cd2014-07-17 16:20:26 +05303658 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303659 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003660
Somnath Koture9e2a902013-10-24 14:37:53 +05303661 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3662 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3663 adapter->fw_ver);
3664 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3665 }
3666
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003667 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003668 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003669
3670 be_set_rx_mode(adapter->netdev);
3671
Suresh Reddy76a9e082014-01-15 13:23:40 +05303672 be_cmd_get_acpi_wol_cap(adapter);
3673
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003674 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003675
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003676 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3677 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003678 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003679
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303680 if (be_physfn(adapter))
3681 be_cmd_set_logical_link_config(adapter,
3682 IFLA_VF_LINK_STATE_AUTO, 0);
3683
Vasundhara Volambec84e62014-06-30 13:01:32 +05303684 if (adapter->num_vfs)
3685 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003686
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003687 status = be_cmd_get_phy_info(adapter);
3688 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003689 adapter->phy.fc_autoneg = 1;
3690
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303691 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303692 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003693 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003694err:
3695 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003696 return status;
3697}
3698
Ivan Vecera66268732011-12-08 01:31:21 +00003699#ifdef CONFIG_NET_POLL_CONTROLLER
3700static void be_netpoll(struct net_device *netdev)
3701{
3702 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003703 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003704 int i;
3705
Sathya Perlae49cc342012-11-27 19:50:02 +00003706 for_all_evt_queues(adapter, eqo, i) {
3707 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3708 napi_schedule(&eqo->napi);
3709 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003710
3711 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003712}
3713#endif
3714
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303715static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003716
Sathya Perla306f1342011-08-02 19:57:45 +00003717static bool phy_flashing_required(struct be_adapter *adapter)
3718{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003719 return (adapter->phy.phy_type == TN_8022 &&
3720 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003721}
3722
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003723static bool is_comp_in_ufi(struct be_adapter *adapter,
3724 struct flash_section_info *fsec, int type)
3725{
3726 int i = 0, img_type = 0;
3727 struct flash_section_info_g2 *fsec_g2 = NULL;
3728
Sathya Perlaca34fe32012-11-06 17:48:56 +00003729 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003730 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3731
3732 for (i = 0; i < MAX_FLASH_COMP; i++) {
3733 if (fsec_g2)
3734 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3735 else
3736 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3737
3738 if (img_type == type)
3739 return true;
3740 }
3741 return false;
3742
3743}
3744
Jingoo Han4188e7d2013-08-05 18:02:02 +09003745static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303746 int header_size,
3747 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003748{
3749 struct flash_section_info *fsec = NULL;
3750 const u8 *p = fw->data;
3751
3752 p += header_size;
3753 while (p < (fw->data + fw->size)) {
3754 fsec = (struct flash_section_info *)p;
3755 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3756 return fsec;
3757 p += 32;
3758 }
3759 return NULL;
3760}
3761
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303762static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3763 u32 img_offset, u32 img_size, int hdr_size,
3764 u16 img_optype, bool *crc_match)
3765{
3766 u32 crc_offset;
3767 int status;
3768 u8 crc[4];
3769
3770 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3771 if (status)
3772 return status;
3773
3774 crc_offset = hdr_size + img_offset + img_size - 4;
3775
3776 /* Skip flashing, if crc of flashed region matches */
3777 if (!memcmp(crc, p + crc_offset, 4))
3778 *crc_match = true;
3779 else
3780 *crc_match = false;
3781
3782 return status;
3783}
3784
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003785static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303786 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003787{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003788 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303789 u32 total_bytes, flash_op, num_bytes;
3790 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003791
3792 total_bytes = img_size;
3793 while (total_bytes) {
3794 num_bytes = min_t(u32, 32*1024, total_bytes);
3795
3796 total_bytes -= num_bytes;
3797
3798 if (!total_bytes) {
3799 if (optype == OPTYPE_PHY_FW)
3800 flash_op = FLASHROM_OPER_PHY_FLASH;
3801 else
3802 flash_op = FLASHROM_OPER_FLASH;
3803 } else {
3804 if (optype == OPTYPE_PHY_FW)
3805 flash_op = FLASHROM_OPER_PHY_SAVE;
3806 else
3807 flash_op = FLASHROM_OPER_SAVE;
3808 }
3809
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003810 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003811 img += num_bytes;
3812 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303813 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303814 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303815 optype == OPTYPE_PHY_FW)
3816 break;
3817 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003818 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003819 }
3820 return 0;
3821}
3822
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003823/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003824static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303825 const struct firmware *fw,
3826 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003827{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003828 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303829 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003830 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303831 int status, i, filehdr_size, num_comp;
3832 const struct flash_comp *pflashcomp;
3833 bool crc_match;
3834 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003835
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003836 struct flash_comp gen3_flash_types[] = {
3837 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3838 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3839 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3840 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3841 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3842 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3843 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3844 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3845 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3846 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3847 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3848 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3849 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3850 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3851 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3852 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3853 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3854 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3855 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3856 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003857 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003858
3859 struct flash_comp gen2_flash_types[] = {
3860 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3861 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3862 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3863 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3864 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3865 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3866 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3867 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3868 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3869 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3870 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3871 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3872 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3873 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3874 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3875 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003876 };
3877
Sathya Perlaca34fe32012-11-06 17:48:56 +00003878 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003879 pflashcomp = gen3_flash_types;
3880 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003881 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003882 } else {
3883 pflashcomp = gen2_flash_types;
3884 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003885 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003886 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003887
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003888 /* Get flash section info*/
3889 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3890 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303891 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003892 return -1;
3893 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003894 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003895 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003896 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003897
3898 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3899 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3900 continue;
3901
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003902 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3903 !phy_flashing_required(adapter))
3904 continue;
3905
3906 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303907 status = be_check_flash_crc(adapter, fw->data,
3908 pflashcomp[i].offset,
3909 pflashcomp[i].size,
3910 filehdr_size +
3911 img_hdrs_size,
3912 OPTYPE_REDBOOT, &crc_match);
3913 if (status) {
3914 dev_err(dev,
3915 "Could not get CRC for 0x%x region\n",
3916 pflashcomp[i].optype);
3917 continue;
3918 }
3919
3920 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003921 continue;
3922 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003923
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303924 p = fw->data + filehdr_size + pflashcomp[i].offset +
3925 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003926 if (p + pflashcomp[i].size > fw->data + fw->size)
3927 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003928
3929 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303930 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003931 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303932 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003933 pflashcomp[i].img_type);
3934 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003935 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003936 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003937 return 0;
3938}
3939
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303940static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3941{
3942 u32 img_type = le32_to_cpu(fsec_entry.type);
3943 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3944
3945 if (img_optype != 0xFFFF)
3946 return img_optype;
3947
3948 switch (img_type) {
3949 case IMAGE_FIRMWARE_iSCSI:
3950 img_optype = OPTYPE_ISCSI_ACTIVE;
3951 break;
3952 case IMAGE_BOOT_CODE:
3953 img_optype = OPTYPE_REDBOOT;
3954 break;
3955 case IMAGE_OPTION_ROM_ISCSI:
3956 img_optype = OPTYPE_BIOS;
3957 break;
3958 case IMAGE_OPTION_ROM_PXE:
3959 img_optype = OPTYPE_PXE_BIOS;
3960 break;
3961 case IMAGE_OPTION_ROM_FCoE:
3962 img_optype = OPTYPE_FCOE_BIOS;
3963 break;
3964 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3965 img_optype = OPTYPE_ISCSI_BACKUP;
3966 break;
3967 case IMAGE_NCSI:
3968 img_optype = OPTYPE_NCSI_FW;
3969 break;
3970 case IMAGE_FLASHISM_JUMPVECTOR:
3971 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3972 break;
3973 case IMAGE_FIRMWARE_PHY:
3974 img_optype = OPTYPE_SH_PHY_FW;
3975 break;
3976 case IMAGE_REDBOOT_DIR:
3977 img_optype = OPTYPE_REDBOOT_DIR;
3978 break;
3979 case IMAGE_REDBOOT_CONFIG:
3980 img_optype = OPTYPE_REDBOOT_CONFIG;
3981 break;
3982 case IMAGE_UFI_DIR:
3983 img_optype = OPTYPE_UFI_DIR;
3984 break;
3985 default:
3986 break;
3987 }
3988
3989 return img_optype;
3990}
3991
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003992static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303993 const struct firmware *fw,
3994 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003995{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003996 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303997 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003998 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303999 u32 img_offset, img_size, img_type;
4000 int status, i, filehdr_size;
4001 bool crc_match, old_fw_img;
4002 u16 img_optype;
4003 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004004
4005 filehdr_size = sizeof(struct flash_file_hdr_g3);
4006 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4007 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304008 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304009 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004010 }
4011
4012 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4013 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4014 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304015 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4016 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4017 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004018
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304019 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004020 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304021 /* Don't bother verifying CRC if an old FW image is being
4022 * flashed
4023 */
4024 if (old_fw_img)
4025 goto flash;
4026
4027 status = be_check_flash_crc(adapter, fw->data, img_offset,
4028 img_size, filehdr_size +
4029 img_hdrs_size, img_optype,
4030 &crc_match);
4031 /* The current FW image on the card does not recognize the new
4032 * FLASH op_type. The FW download is partially complete.
4033 * Reboot the server now to enable FW image to recognize the
4034 * new FLASH op_type. To complete the remaining process,
4035 * download the same FW again after the reboot.
4036 */
Kalesh AP4c600052014-05-30 19:06:26 +05304037 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4038 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304039 dev_err(dev, "Flash incomplete. Reset the server\n");
4040 dev_err(dev, "Download FW image again after reset\n");
4041 return -EAGAIN;
4042 } else if (status) {
4043 dev_err(dev, "Could not get CRC for 0x%x region\n",
4044 img_optype);
4045 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004046 }
4047
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304048 if (crc_match)
4049 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004050
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304051flash:
4052 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004053 if (p + img_size > fw->data + fw->size)
4054 return -1;
4055
4056 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304057 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4058 * UFI_DIR region
4059 */
Kalesh AP4c600052014-05-30 19:06:26 +05304060 if (old_fw_img &&
4061 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4062 (img_optype == OPTYPE_UFI_DIR &&
4063 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304064 continue;
4065 } else if (status) {
4066 dev_err(dev, "Flashing section type 0x%x failed\n",
4067 img_type);
4068 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004069 }
4070 }
4071 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004072}
4073
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004074static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304075 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004076{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004077#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4078#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304079 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004080 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004081 const u8 *data_ptr = NULL;
4082 u8 *dest_image_ptr = NULL;
4083 size_t image_size = 0;
4084 u32 chunk_size = 0;
4085 u32 data_written = 0;
4086 u32 offset = 0;
4087 int status = 0;
4088 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004089 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004090
4091 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304092 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304093 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004094 }
4095
4096 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4097 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304098 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004099 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304100 if (!flash_cmd.va)
4101 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004102
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004103 dest_image_ptr = flash_cmd.va +
4104 sizeof(struct lancer_cmd_req_write_object);
4105 image_size = fw->size;
4106 data_ptr = fw->data;
4107
4108 while (image_size) {
4109 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4110
4111 /* Copy the image chunk content. */
4112 memcpy(dest_image_ptr, data_ptr, chunk_size);
4113
4114 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004115 chunk_size, offset,
4116 LANCER_FW_DOWNLOAD_LOCATION,
4117 &data_written, &change_status,
4118 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004119 if (status)
4120 break;
4121
4122 offset += data_written;
4123 data_ptr += data_written;
4124 image_size -= data_written;
4125 }
4126
4127 if (!status) {
4128 /* Commit the FW written */
4129 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004130 0, offset,
4131 LANCER_FW_DOWNLOAD_LOCATION,
4132 &data_written, &change_status,
4133 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004134 }
4135
Kalesh APbb864e02014-09-02 09:56:51 +05304136 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004137 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304138 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304139 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004140 }
4141
Kalesh APbb864e02014-09-02 09:56:51 +05304142 dev_info(dev, "Firmware flashed successfully\n");
4143
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004144 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304145 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004146 status = lancer_physdev_ctrl(adapter,
4147 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004148 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304149 dev_err(dev, "Adapter busy, could not reset FW\n");
4150 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004151 }
4152 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304153 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004154 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304155
4156 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004157}
4158
Sathya Perlaca34fe32012-11-06 17:48:56 +00004159#define UFI_TYPE2 2
4160#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004161#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004162#define UFI_TYPE4 4
4163static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004164 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004165{
Kalesh APddf11692014-07-17 16:20:28 +05304166 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004167 goto be_get_ufi_exit;
4168
Sathya Perlaca34fe32012-11-06 17:48:56 +00004169 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4170 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004171 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4172 if (fhdr->asic_type_rev == 0x10)
4173 return UFI_TYPE3R;
4174 else
4175 return UFI_TYPE3;
4176 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004177 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004178
4179be_get_ufi_exit:
4180 dev_err(&adapter->pdev->dev,
4181 "UFI and Interface are not compatible for flashing\n");
4182 return -1;
4183}
4184
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004185static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4186{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004187 struct flash_file_hdr_g3 *fhdr3;
4188 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004189 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004190 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004191 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004192
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004193 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004194 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4195 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004196 if (!flash_cmd.va) {
4197 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004198 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004199 }
4200
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004201 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004202 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004203
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004204 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004205
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004206 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4207 for (i = 0; i < num_imgs; i++) {
4208 img_hdr_ptr = (struct image_hdr *)(fw->data +
4209 (sizeof(struct flash_file_hdr_g3) +
4210 i * sizeof(struct image_hdr)));
4211 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004212 switch (ufi_type) {
4213 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004214 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304215 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004216 break;
4217 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004218 status = be_flash_BEx(adapter, fw, &flash_cmd,
4219 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004220 break;
4221 case UFI_TYPE3:
4222 /* Do not flash this ufi on BE3-R cards */
4223 if (adapter->asic_rev < 0x10)
4224 status = be_flash_BEx(adapter, fw,
4225 &flash_cmd,
4226 num_imgs);
4227 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304228 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004229 dev_err(&adapter->pdev->dev,
4230 "Can't load BE3 UFI on BE3R\n");
4231 }
4232 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004233 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004234 }
4235
Sathya Perlaca34fe32012-11-06 17:48:56 +00004236 if (ufi_type == UFI_TYPE2)
4237 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004238 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304239 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004240
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004241 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4242 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004243 if (status) {
4244 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004245 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004246 }
4247
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004248 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004249
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004250be_fw_exit:
4251 return status;
4252}
4253
4254int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4255{
4256 const struct firmware *fw;
4257 int status;
4258
4259 if (!netif_running(adapter->netdev)) {
4260 dev_err(&adapter->pdev->dev,
4261 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304262 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004263 }
4264
4265 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4266 if (status)
4267 goto fw_exit;
4268
4269 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4270
4271 if (lancer_chip(adapter))
4272 status = lancer_fw_download(adapter, fw);
4273 else
4274 status = be_fw_download(adapter, fw);
4275
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004276 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304277 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004278
Ajit Khaparde84517482009-09-04 03:12:16 +00004279fw_exit:
4280 release_firmware(fw);
4281 return status;
4282}
4283
Sathya Perla748b5392014-05-09 13:29:13 +05304284static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004285{
4286 struct be_adapter *adapter = netdev_priv(dev);
4287 struct nlattr *attr, *br_spec;
4288 int rem;
4289 int status = 0;
4290 u16 mode = 0;
4291
4292 if (!sriov_enabled(adapter))
4293 return -EOPNOTSUPP;
4294
4295 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4296
4297 nla_for_each_nested(attr, br_spec, rem) {
4298 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4299 continue;
4300
4301 mode = nla_get_u16(attr);
4302 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4303 return -EINVAL;
4304
4305 status = be_cmd_set_hsw_config(adapter, 0, 0,
4306 adapter->if_handle,
4307 mode == BRIDGE_MODE_VEPA ?
4308 PORT_FWD_TYPE_VEPA :
4309 PORT_FWD_TYPE_VEB);
4310 if (status)
4311 goto err;
4312
4313 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4314 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4315
4316 return status;
4317 }
4318err:
4319 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4320 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4321
4322 return status;
4323}
4324
4325static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304326 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004327{
4328 struct be_adapter *adapter = netdev_priv(dev);
4329 int status = 0;
4330 u8 hsw_mode;
4331
4332 if (!sriov_enabled(adapter))
4333 return 0;
4334
4335 /* BE and Lancer chips support VEB mode only */
4336 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4337 hsw_mode = PORT_FWD_TYPE_VEB;
4338 } else {
4339 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4340 adapter->if_handle, &hsw_mode);
4341 if (status)
4342 return 0;
4343 }
4344
4345 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4346 hsw_mode == PORT_FWD_TYPE_VEPA ?
4347 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4348}
4349
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304350#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304351static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4352 __be16 port)
4353{
4354 struct be_adapter *adapter = netdev_priv(netdev);
4355 struct device *dev = &adapter->pdev->dev;
4356 int status;
4357
4358 if (lancer_chip(adapter) || BEx_chip(adapter))
4359 return;
4360
4361 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4362 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4363 be16_to_cpu(port));
4364 dev_info(dev,
4365 "Only one UDP port supported for VxLAN offloads\n");
4366 return;
4367 }
4368
4369 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4370 OP_CONVERT_NORMAL_TO_TUNNEL);
4371 if (status) {
4372 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4373 goto err;
4374 }
4375
4376 status = be_cmd_set_vxlan_port(adapter, port);
4377 if (status) {
4378 dev_warn(dev, "Failed to add VxLAN port\n");
4379 goto err;
4380 }
4381 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4382 adapter->vxlan_port = port;
4383
4384 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4385 be16_to_cpu(port));
4386 return;
4387err:
4388 be_disable_vxlan_offloads(adapter);
4389 return;
4390}
4391
4392static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4393 __be16 port)
4394{
4395 struct be_adapter *adapter = netdev_priv(netdev);
4396
4397 if (lancer_chip(adapter) || BEx_chip(adapter))
4398 return;
4399
4400 if (adapter->vxlan_port != port)
4401 return;
4402
4403 be_disable_vxlan_offloads(adapter);
4404
4405 dev_info(&adapter->pdev->dev,
4406 "Disabled VxLAN offloads for UDP port %d\n",
4407 be16_to_cpu(port));
4408}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304409#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304410
stephen hemmingere5686ad2012-01-05 19:10:25 +00004411static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004412 .ndo_open = be_open,
4413 .ndo_stop = be_close,
4414 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004415 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004416 .ndo_set_mac_address = be_mac_addr_set,
4417 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004418 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004419 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004420 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4421 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004422 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004423 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004424 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004425 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304426 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004427#ifdef CONFIG_NET_POLL_CONTROLLER
4428 .ndo_poll_controller = be_netpoll,
4429#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004430 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4431 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304432#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304433 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304434#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304435#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304436 .ndo_add_vxlan_port = be_add_vxlan_port,
4437 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304438#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004439};
4440
4441static void be_netdev_init(struct net_device *netdev)
4442{
4443 struct be_adapter *adapter = netdev_priv(netdev);
4444
Sathya Perlac9c47142014-03-27 10:46:19 +05304445 if (skyhawk_chip(adapter)) {
4446 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4447 NETIF_F_TSO | NETIF_F_TSO6 |
4448 NETIF_F_GSO_UDP_TUNNEL;
4449 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4450 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004451 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004452 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004453 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004454 if (be_multi_rxq(adapter))
4455 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004456
4457 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004458 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004459
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004460 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004461 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004462
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004463 netdev->priv_flags |= IFF_UNICAST_FLT;
4464
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004465 netdev->flags |= IFF_MULTICAST;
4466
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004467 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004468
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004469 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004470
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004471 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004472}
4473
4474static void be_unmap_pci_bars(struct be_adapter *adapter)
4475{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004476 if (adapter->csr)
4477 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004478 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004479 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004480}
4481
Sathya Perlace66f782012-11-06 17:48:58 +00004482static int db_bar(struct be_adapter *adapter)
4483{
4484 if (lancer_chip(adapter) || !be_physfn(adapter))
4485 return 0;
4486 else
4487 return 4;
4488}
4489
4490static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004491{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004492 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004493 adapter->roce_db.size = 4096;
4494 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4495 db_bar(adapter));
4496 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4497 db_bar(adapter));
4498 }
Parav Pandit045508a2012-03-26 14:27:13 +00004499 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004500}
4501
4502static int be_map_pci_bars(struct be_adapter *adapter)
4503{
4504 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004505
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004506 if (BEx_chip(adapter) && be_physfn(adapter)) {
4507 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304508 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004509 return -ENOMEM;
4510 }
4511
Sathya Perlace66f782012-11-06 17:48:58 +00004512 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304513 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004514 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004515 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004516
4517 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004518 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004519
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004520pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304521 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004522 be_unmap_pci_bars(adapter);
4523 return -ENOMEM;
4524}
4525
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526static void be_ctrl_cleanup(struct be_adapter *adapter)
4527{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004528 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004529
4530 be_unmap_pci_bars(adapter);
4531
4532 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004533 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4534 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004535
Sathya Perla5b8821b2011-08-02 19:57:44 +00004536 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004537 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004538 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4539 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004540}
4541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542static int be_ctrl_init(struct be_adapter *adapter)
4543{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004544 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4545 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004546 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004547 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004548 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549
Sathya Perlace66f782012-11-06 17:48:58 +00004550 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4551 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4552 SLI_INTF_FAMILY_SHIFT;
4553 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555 status = be_map_pci_bars(adapter);
4556 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004557 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004558
4559 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004560 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4561 mbox_mem_alloc->size,
4562 &mbox_mem_alloc->dma,
4563 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004565 status = -ENOMEM;
4566 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004567 }
4568 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4569 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4570 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4571 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004572
Sathya Perla5b8821b2011-08-02 19:57:44 +00004573 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004574 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4575 rx_filter->size, &rx_filter->dma,
4576 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304577 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004578 status = -ENOMEM;
4579 goto free_mbox;
4580 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004581
Ivan Vecera29849612010-12-14 05:43:19 +00004582 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004583 spin_lock_init(&adapter->mcc_lock);
4584 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004585
Suresh Reddy5eeff632014-01-06 13:02:24 +05304586 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004587 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004588 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004589
4590free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004591 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4592 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004593
4594unmap_pci_bars:
4595 be_unmap_pci_bars(adapter);
4596
4597done:
4598 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004599}
4600
4601static void be_stats_cleanup(struct be_adapter *adapter)
4602{
Sathya Perla3abcded2010-10-03 22:12:27 -07004603 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004604
4605 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004606 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4607 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004608}
4609
4610static int be_stats_init(struct be_adapter *adapter)
4611{
Sathya Perla3abcded2010-10-03 22:12:27 -07004612 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004613
Sathya Perlaca34fe32012-11-06 17:48:56 +00004614 if (lancer_chip(adapter))
4615 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4616 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004617 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004618 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004619 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004620 else
4621 /* ALL non-BE ASICs */
4622 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004623
Joe Perchesede23fa82013-08-26 22:45:23 -07004624 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4625 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304626 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304627 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004628 return 0;
4629}
4630
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004631static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004632{
4633 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004634
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004635 if (!adapter)
4636 return;
4637
Parav Pandit045508a2012-03-26 14:27:13 +00004638 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004639 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004640
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004641 cancel_delayed_work_sync(&adapter->func_recovery_work);
4642
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004643 unregister_netdev(adapter->netdev);
4644
Sathya Perla5fb379e2009-06-18 00:02:59 +00004645 be_clear(adapter);
4646
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004647 /* tell fw we're done with firing cmds */
4648 be_cmd_fw_clean(adapter);
4649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004650 be_stats_cleanup(adapter);
4651
4652 be_ctrl_cleanup(adapter);
4653
Sathya Perlad6b6d982012-09-05 01:56:48 +00004654 pci_disable_pcie_error_reporting(pdev);
4655
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004656 pci_release_regions(pdev);
4657 pci_disable_device(pdev);
4658
4659 free_netdev(adapter->netdev);
4660}
4661
Sathya Perla39f1d942012-05-08 19:41:24 +00004662static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004663{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304664 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004665
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004666 status = be_cmd_get_cntl_attributes(adapter);
4667 if (status)
4668 return status;
4669
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004670 /* Must be a power of 2 or else MODULO will BUG_ON */
4671 adapter->be_get_temp_freq = 64;
4672
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304673 if (BEx_chip(adapter)) {
4674 level = be_cmd_get_fw_log_level(adapter);
4675 adapter->msg_enable =
4676 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4677 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004678
Sathya Perla92bf14a2013-08-27 16:57:32 +05304679 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004680 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004681}
4682
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004683static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004684{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004685 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004686 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004687
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004688 status = lancer_test_and_set_rdy_state(adapter);
4689 if (status)
4690 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004691
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004692 if (netif_running(adapter->netdev))
4693 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004694
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004695 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004696
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004697 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004698
4699 status = be_setup(adapter);
4700 if (status)
4701 goto err;
4702
4703 if (netif_running(adapter->netdev)) {
4704 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004705 if (status)
4706 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004707 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004708
Somnath Kotur4bebb562013-12-05 12:07:55 +05304709 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004710 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004711err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004712 if (status == -EAGAIN)
4713 dev_err(dev, "Waiting for resource provisioning\n");
4714 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304715 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004716
4717 return status;
4718}
4719
4720static void be_func_recovery_task(struct work_struct *work)
4721{
4722 struct be_adapter *adapter =
4723 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004724 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004725
4726 be_detect_error(adapter);
4727
4728 if (adapter->hw_error && lancer_chip(adapter)) {
4729
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004730 rtnl_lock();
4731 netif_device_detach(adapter->netdev);
4732 rtnl_unlock();
4733
4734 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004735 if (!status)
4736 netif_device_attach(adapter->netdev);
4737 }
4738
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004739 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4740 * no need to attempt further recovery.
4741 */
4742 if (!status || status == -EAGAIN)
4743 schedule_delayed_work(&adapter->func_recovery_work,
4744 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004745}
4746
4747static void be_worker(struct work_struct *work)
4748{
4749 struct be_adapter *adapter =
4750 container_of(work, struct be_adapter, work.work);
4751 struct be_rx_obj *rxo;
4752 int i;
4753
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004754 /* when interrupts are not yet enabled, just reap any pending
4755 * mcc completions */
4756 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004757 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004758 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004759 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004760 goto reschedule;
4761 }
4762
4763 if (!adapter->stats_cmd_sent) {
4764 if (lancer_chip(adapter))
4765 lancer_cmd_get_pport_stats(adapter,
4766 &adapter->stats_cmd);
4767 else
4768 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4769 }
4770
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304771 if (be_physfn(adapter) &&
4772 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004773 be_cmd_get_die_temperature(adapter);
4774
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004775 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304776 /* Replenish RX-queues starved due to memory
4777 * allocation failures.
4778 */
4779 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004780 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004781 }
4782
Sathya Perla2632baf2013-10-01 16:00:00 +05304783 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004784
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004785reschedule:
4786 adapter->work_counter++;
4787 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4788}
4789
Sathya Perla257a3fe2013-06-14 15:54:51 +05304790/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004791static bool be_reset_required(struct be_adapter *adapter)
4792{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304793 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004794}
4795
Sathya Perlad3791422012-09-28 04:39:44 +00004796static char *mc_name(struct be_adapter *adapter)
4797{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304798 char *str = ""; /* default */
4799
4800 switch (adapter->mc_type) {
4801 case UMC:
4802 str = "UMC";
4803 break;
4804 case FLEX10:
4805 str = "FLEX10";
4806 break;
4807 case vNIC1:
4808 str = "vNIC-1";
4809 break;
4810 case nPAR:
4811 str = "nPAR";
4812 break;
4813 case UFP:
4814 str = "UFP";
4815 break;
4816 case vNIC2:
4817 str = "vNIC-2";
4818 break;
4819 default:
4820 str = "";
4821 }
4822
4823 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004824}
4825
4826static inline char *func_name(struct be_adapter *adapter)
4827{
4828 return be_physfn(adapter) ? "PF" : "VF";
4829}
4830
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004831static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004832{
4833 int status = 0;
4834 struct be_adapter *adapter;
4835 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004836 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004837
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304838 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4839
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004840 status = pci_enable_device(pdev);
4841 if (status)
4842 goto do_none;
4843
4844 status = pci_request_regions(pdev, DRV_NAME);
4845 if (status)
4846 goto disable_dev;
4847 pci_set_master(pdev);
4848
Sathya Perla7f640062012-06-05 19:37:20 +00004849 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304850 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004851 status = -ENOMEM;
4852 goto rel_reg;
4853 }
4854 adapter = netdev_priv(netdev);
4855 adapter->pdev = pdev;
4856 pci_set_drvdata(pdev, adapter);
4857 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004858 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004859
Russell King4c15c242013-06-26 23:49:11 +01004860 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004861 if (!status) {
4862 netdev->features |= NETIF_F_HIGHDMA;
4863 } else {
Russell King4c15c242013-06-26 23:49:11 +01004864 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004865 if (status) {
4866 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4867 goto free_netdev;
4868 }
4869 }
4870
Ajit Khapardeea58c182013-10-18 16:06:24 -05004871 if (be_physfn(adapter)) {
4872 status = pci_enable_pcie_error_reporting(pdev);
4873 if (!status)
4874 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4875 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004876
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004877 status = be_ctrl_init(adapter);
4878 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004879 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004880
Sathya Perla2243e2e2009-11-22 22:02:03 +00004881 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004882 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004883 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004884 if (status)
4885 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004886 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004887
Sathya Perla39f1d942012-05-08 19:41:24 +00004888 if (be_reset_required(adapter)) {
4889 status = be_cmd_reset_function(adapter);
4890 if (status)
4891 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004892
Kalesh AP2d177be2013-04-28 22:22:29 +00004893 /* Wait for interrupts to quiesce after an FLR */
4894 msleep(100);
4895 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004896
4897 /* Allow interrupts for other ULPs running on NIC function */
4898 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004899
Kalesh AP2d177be2013-04-28 22:22:29 +00004900 /* tell fw we're ready to fire cmds */
4901 status = be_cmd_fw_init(adapter);
4902 if (status)
4903 goto ctrl_clean;
4904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004905 status = be_stats_init(adapter);
4906 if (status)
4907 goto ctrl_clean;
4908
Sathya Perla39f1d942012-05-08 19:41:24 +00004909 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004910 if (status)
4911 goto stats_clean;
4912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004913 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004914 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004915 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004916
Sathya Perla5fb379e2009-06-18 00:02:59 +00004917 status = be_setup(adapter);
4918 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004919 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004920
Sathya Perla3abcded2010-10-03 22:12:27 -07004921 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004922 status = register_netdev(netdev);
4923 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004924 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004925
Parav Pandit045508a2012-03-26 14:27:13 +00004926 be_roce_dev_add(adapter);
4927
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004928 schedule_delayed_work(&adapter->func_recovery_work,
4929 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004930
4931 be_cmd_query_port_name(adapter, &port_name);
4932
Sathya Perlad3791422012-09-28 04:39:44 +00004933 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4934 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004936 return 0;
4937
Sathya Perla5fb379e2009-06-18 00:02:59 +00004938unsetup:
4939 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004940stats_clean:
4941 be_stats_cleanup(adapter);
4942ctrl_clean:
4943 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004944free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004945 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004946rel_reg:
4947 pci_release_regions(pdev);
4948disable_dev:
4949 pci_disable_device(pdev);
4950do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004951 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004952 return status;
4953}
4954
4955static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4956{
4957 struct be_adapter *adapter = pci_get_drvdata(pdev);
4958 struct net_device *netdev = adapter->netdev;
4959
Suresh Reddy76a9e082014-01-15 13:23:40 +05304960 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004961 be_setup_wol(adapter, true);
4962
Ajit Khaparded4360d62013-11-22 12:51:09 -06004963 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004964 cancel_delayed_work_sync(&adapter->func_recovery_work);
4965
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004966 netif_device_detach(netdev);
4967 if (netif_running(netdev)) {
4968 rtnl_lock();
4969 be_close(netdev);
4970 rtnl_unlock();
4971 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004972 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004973
4974 pci_save_state(pdev);
4975 pci_disable_device(pdev);
4976 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4977 return 0;
4978}
4979
4980static int be_resume(struct pci_dev *pdev)
4981{
4982 int status = 0;
4983 struct be_adapter *adapter = pci_get_drvdata(pdev);
4984 struct net_device *netdev = adapter->netdev;
4985
4986 netif_device_detach(netdev);
4987
4988 status = pci_enable_device(pdev);
4989 if (status)
4990 return status;
4991
Yijing Wang1ca01512013-06-27 20:53:42 +08004992 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004993 pci_restore_state(pdev);
4994
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304995 status = be_fw_wait_ready(adapter);
4996 if (status)
4997 return status;
4998
Ajit Khaparded4360d62013-11-22 12:51:09 -06004999 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005000 /* tell fw we're ready to fire cmds */
5001 status = be_cmd_fw_init(adapter);
5002 if (status)
5003 return status;
5004
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005005 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005006 if (netif_running(netdev)) {
5007 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005008 be_open(netdev);
5009 rtnl_unlock();
5010 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005011
5012 schedule_delayed_work(&adapter->func_recovery_work,
5013 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005014 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005015
Suresh Reddy76a9e082014-01-15 13:23:40 +05305016 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005017 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005019 return 0;
5020}
5021
Sathya Perla82456b02010-02-17 01:35:37 +00005022/*
5023 * An FLR will stop BE from DMAing any data.
5024 */
5025static void be_shutdown(struct pci_dev *pdev)
5026{
5027 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005028
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005029 if (!adapter)
5030 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005031
Devesh Sharmad114f992014-06-10 19:32:15 +05305032 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005033 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005034 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005035
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005036 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005037
Ajit Khaparde57841862011-04-06 18:08:43 +00005038 be_cmd_reset_function(adapter);
5039
Sathya Perla82456b02010-02-17 01:35:37 +00005040 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005041}
5042
Sathya Perlacf588472010-02-14 21:22:01 +00005043static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305044 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005045{
5046 struct be_adapter *adapter = pci_get_drvdata(pdev);
5047 struct net_device *netdev = adapter->netdev;
5048
5049 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5050
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005051 if (!adapter->eeh_error) {
5052 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005053
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005054 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005055
Sathya Perlacf588472010-02-14 21:22:01 +00005056 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005057 netif_device_detach(netdev);
5058 if (netif_running(netdev))
5059 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005060 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005061
5062 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005063 }
Sathya Perlacf588472010-02-14 21:22:01 +00005064
5065 if (state == pci_channel_io_perm_failure)
5066 return PCI_ERS_RESULT_DISCONNECT;
5067
5068 pci_disable_device(pdev);
5069
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005070 /* The error could cause the FW to trigger a flash debug dump.
5071 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005072 * can cause it not to recover; wait for it to finish.
5073 * Wait only for first function as it is needed only once per
5074 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005075 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005076 if (pdev->devfn == 0)
5077 ssleep(30);
5078
Sathya Perlacf588472010-02-14 21:22:01 +00005079 return PCI_ERS_RESULT_NEED_RESET;
5080}
5081
5082static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5083{
5084 struct be_adapter *adapter = pci_get_drvdata(pdev);
5085 int status;
5086
5087 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005088
5089 status = pci_enable_device(pdev);
5090 if (status)
5091 return PCI_ERS_RESULT_DISCONNECT;
5092
5093 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005094 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005095 pci_restore_state(pdev);
5096
5097 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005098 dev_info(&adapter->pdev->dev,
5099 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005100 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005101 if (status)
5102 return PCI_ERS_RESULT_DISCONNECT;
5103
Sathya Perlad6b6d982012-09-05 01:56:48 +00005104 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005105 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005106 return PCI_ERS_RESULT_RECOVERED;
5107}
5108
5109static void be_eeh_resume(struct pci_dev *pdev)
5110{
5111 int status = 0;
5112 struct be_adapter *adapter = pci_get_drvdata(pdev);
5113 struct net_device *netdev = adapter->netdev;
5114
5115 dev_info(&adapter->pdev->dev, "EEH resume\n");
5116
5117 pci_save_state(pdev);
5118
Kalesh AP2d177be2013-04-28 22:22:29 +00005119 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005120 if (status)
5121 goto err;
5122
Kalesh AP03a58ba2014-05-13 14:03:11 +05305123 /* On some BE3 FW versions, after a HW reset,
5124 * interrupts will remain disabled for each function.
5125 * So, explicitly enable interrupts
5126 */
5127 be_intr_set(adapter, true);
5128
Kalesh AP2d177be2013-04-28 22:22:29 +00005129 /* tell fw we're ready to fire cmds */
5130 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005131 if (status)
5132 goto err;
5133
Sathya Perlacf588472010-02-14 21:22:01 +00005134 status = be_setup(adapter);
5135 if (status)
5136 goto err;
5137
5138 if (netif_running(netdev)) {
5139 status = be_open(netdev);
5140 if (status)
5141 goto err;
5142 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005143
5144 schedule_delayed_work(&adapter->func_recovery_work,
5145 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005146 netif_device_attach(netdev);
5147 return;
5148err:
5149 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005150}
5151
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005152static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005153 .error_detected = be_eeh_err_detected,
5154 .slot_reset = be_eeh_reset,
5155 .resume = be_eeh_resume,
5156};
5157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005158static struct pci_driver be_driver = {
5159 .name = DRV_NAME,
5160 .id_table = be_dev_ids,
5161 .probe = be_probe,
5162 .remove = be_remove,
5163 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005164 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005165 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005166 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005167};
5168
5169static int __init be_init_module(void)
5170{
Joe Perches8e95a202009-12-03 07:58:21 +00005171 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5172 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005173 printk(KERN_WARNING DRV_NAME
5174 " : Module param rx_frag_size must be 2048/4096/8192."
5175 " Using 2048\n");
5176 rx_frag_size = 2048;
5177 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005178
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005179 return pci_register_driver(&be_driver);
5180}
5181module_init(be_init_module);
5182
5183static void __exit be_exit_module(void)
5184{
5185 pci_unregister_driver(&be_driver);
5186}
5187module_exit(be_exit_module);