blob: b06e54084d757cf957b7ebcff2ad15ad179d643f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Patrick McHardy80d5c362013-04-19 02:04:28 +00001148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301155 return status;
1156
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301157 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301158 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301161 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001162
Somnath Kotura6b74e02014-01-21 15:50:55 +05301163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301168
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001169 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170}
1171
Patrick McHardy80d5c362013-04-19 02:04:28 +00001172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301178 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001179
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301180 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Somnath kotur7ad09452014-03-03 14:24:43 +05301186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
Sathya Perlaa54769f2011-10-24 02:45:00 +00001194static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001197 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198
1199 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001201 adapter->promiscuous = true;
1202 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001204
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001205 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001206 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301207 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001208 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001209 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 }
1211
Sathya Perlae7b909a2009-11-22 22:01:10 +00001212 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001213 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001216
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
Sathya Perla92bf14a2013-08-27 16:57:32 +05301226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001245 }
Kalesh APa0794882014-05-30 19:06:23 +05301246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001257done:
1258 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259}
1260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001265 int status;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268 return -EPERM;
1269
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return -EINVAL;
1272
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301273 /* Proceed further only if user provided MAC is different
1274 * from active MAC
1275 */
1276 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1277 return 0;
1278
Sathya Perla3175d8c2013-07-23 15:25:03 +05301279 if (BEx_chip(adapter)) {
1280 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1281 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001282
Sathya Perla11ac75e2011-12-13 00:58:50 +00001283 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1284 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301285 } else {
1286 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1287 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001288 }
1289
Kalesh APabccf232014-07-17 16:20:24 +05301290 if (status) {
1291 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1292 mac, vf, status);
1293 return be_cmd_status(status);
1294 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001295
Kalesh APabccf232014-07-17 16:20:24 +05301296 ether_addr_copy(vf_cfg->mac_addr, mac);
1297
1298 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001299}
1300
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001301static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301302 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303{
1304 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001305 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001306
Sathya Perla11ac75e2011-12-13 00:58:50 +00001307 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001308 return -EPERM;
1309
Sathya Perla11ac75e2011-12-13 00:58:50 +00001310 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311 return -EINVAL;
1312
1313 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001314 vi->max_tx_rate = vf_cfg->tx_rate;
1315 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001316 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1317 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301319 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001320
1321 return 0;
1322}
1323
Sathya Perla748b5392014-05-09 13:29:13 +05301324static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325{
1326 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001327 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001328 int status = 0;
1329
Sathya Perla11ac75e2011-12-13 00:58:50 +00001330 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001331 return -EPERM;
1332
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001333 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001334 return -EINVAL;
1335
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001336 if (vlan || qos) {
1337 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301338 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001339 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1340 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001342 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301343 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1344 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001345 }
1346
Kalesh APabccf232014-07-17 16:20:24 +05301347 if (status) {
1348 dev_err(&adapter->pdev->dev,
1349 "VLAN %d config on VF %d failed : %#x\n", vlan,
1350 vf, status);
1351 return be_cmd_status(status);
1352 }
1353
1354 vf_cfg->vlan_tag = vlan;
1355
1356 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001357}
1358
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001359static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1360 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001361{
1362 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301363 struct device *dev = &adapter->pdev->dev;
1364 int percent_rate, status = 0;
1365 u16 link_speed = 0;
1366 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001367
Sathya Perla11ac75e2011-12-13 00:58:50 +00001368 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001369 return -EPERM;
1370
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001371 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001372 return -EINVAL;
1373
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001374 if (min_tx_rate)
1375 return -EINVAL;
1376
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301377 if (!max_tx_rate)
1378 goto config_qos;
1379
1380 status = be_cmd_link_status_query(adapter, &link_speed,
1381 &link_status, 0);
1382 if (status)
1383 goto err;
1384
1385 if (!link_status) {
1386 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301387 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301388 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001389 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001390
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301391 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1392 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1393 link_speed);
1394 status = -EINVAL;
1395 goto err;
1396 }
1397
1398 /* On Skyhawk the QOS setting must be done only as a % value */
1399 percent_rate = link_speed / 100;
1400 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1401 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1402 percent_rate);
1403 status = -EINVAL;
1404 goto err;
1405 }
1406
1407config_qos:
1408 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001409 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301410 goto err;
1411
1412 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1413 return 0;
1414
1415err:
1416 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1417 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301418 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001419}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301420static int be_set_vf_link_state(struct net_device *netdev, int vf,
1421 int link_state)
1422{
1423 struct be_adapter *adapter = netdev_priv(netdev);
1424 int status;
1425
1426 if (!sriov_enabled(adapter))
1427 return -EPERM;
1428
1429 if (vf >= adapter->num_vfs)
1430 return -EINVAL;
1431
1432 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301433 if (status) {
1434 dev_err(&adapter->pdev->dev,
1435 "Link state change on VF %d failed: %#x\n", vf, status);
1436 return be_cmd_status(status);
1437 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301438
Kalesh APabccf232014-07-17 16:20:24 +05301439 adapter->vf_cfg[vf].plink_tracking = link_state;
1440
1441 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301442}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001443
Sathya Perla2632baf2013-10-01 16:00:00 +05301444static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1445 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446{
Sathya Perla2632baf2013-10-01 16:00:00 +05301447 aic->rx_pkts_prev = rx_pkts;
1448 aic->tx_reqs_prev = tx_pkts;
1449 aic->jiffies = now;
1450}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001451
Sathya Perla2632baf2013-10-01 16:00:00 +05301452static void be_eqd_update(struct be_adapter *adapter)
1453{
1454 struct be_set_eqd set_eqd[MAX_EVT_QS];
1455 int eqd, i, num = 0, start;
1456 struct be_aic_obj *aic;
1457 struct be_eq_obj *eqo;
1458 struct be_rx_obj *rxo;
1459 struct be_tx_obj *txo;
1460 u64 rx_pkts, tx_pkts;
1461 ulong now;
1462 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001463
Sathya Perla2632baf2013-10-01 16:00:00 +05301464 for_all_evt_queues(adapter, eqo, i) {
1465 aic = &adapter->aic_obj[eqo->idx];
1466 if (!aic->enable) {
1467 if (aic->jiffies)
1468 aic->jiffies = 0;
1469 eqd = aic->et_eqd;
1470 goto modify_eqd;
1471 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472
Sathya Perla2632baf2013-10-01 16:00:00 +05301473 rxo = &adapter->rx_obj[eqo->idx];
1474 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001475 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301476 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001477 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001478
Sathya Perla2632baf2013-10-01 16:00:00 +05301479 txo = &adapter->tx_obj[eqo->idx];
1480 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001481 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301482 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001483 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001484
Sathya Perla4097f662009-03-24 16:40:13 -07001485
Sathya Perla2632baf2013-10-01 16:00:00 +05301486 /* Skip, if wrapped around or first calculation */
1487 now = jiffies;
1488 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1489 rx_pkts < aic->rx_pkts_prev ||
1490 tx_pkts < aic->tx_reqs_prev) {
1491 be_aic_update(aic, rx_pkts, tx_pkts, now);
1492 continue;
1493 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001494
Sathya Perla2632baf2013-10-01 16:00:00 +05301495 delta = jiffies_to_msecs(now - aic->jiffies);
1496 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1497 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1498 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499
Sathya Perla2632baf2013-10-01 16:00:00 +05301500 if (eqd < 8)
1501 eqd = 0;
1502 eqd = min_t(u32, eqd, aic->max_eqd);
1503 eqd = max_t(u32, eqd, aic->min_eqd);
1504
1505 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001506modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301507 if (eqd != aic->prev_eqd) {
1508 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1509 set_eqd[num].eq_id = eqo->q.id;
1510 aic->prev_eqd = eqd;
1511 num++;
1512 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001513 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301514
1515 if (num)
1516 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001517}
1518
Sathya Perla3abcded2010-10-03 22:12:27 -07001519static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301520 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001521{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001522 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001523
Sathya Perlaab1594e2011-07-25 19:10:15 +00001524 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001525 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001526 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001527 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001529 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001530 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001531 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533}
1534
Sathya Perla2e588f82011-03-11 02:49:26 +00001535static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001536{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001537 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301538 * Also ignore ipcksm for ipv6 pkts
1539 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001540 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301541 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001542}
1543
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301544static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001546 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001548 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301549 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550
Sathya Perla3abcded2010-10-03 22:12:27 -07001551 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 BUG_ON(!rx_page_info->page);
1553
Sathya Perlae50287b2014-03-04 12:14:38 +05301554 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001555 dma_unmap_page(&adapter->pdev->dev,
1556 dma_unmap_addr(rx_page_info, bus),
1557 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301558 rx_page_info->last_frag = false;
1559 } else {
1560 dma_sync_single_for_cpu(&adapter->pdev->dev,
1561 dma_unmap_addr(rx_page_info, bus),
1562 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001563 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301565 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 atomic_dec(&rxq->used);
1567 return rx_page_info;
1568}
1569
1570/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571static void be_rx_compl_discard(struct be_rx_obj *rxo,
1572 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001575 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001577 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301578 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001579 put_page(page_info->page);
1580 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 }
1582}
1583
1584/*
1585 * skb_fill_rx_data forms a complete skb for an ether frame
1586 * indicated by rxcp.
1587 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001588static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1589 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001592 u16 i, j;
1593 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 u8 *start;
1595
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301596 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 start = page_address(page_info->page) + page_info->page_offset;
1598 prefetch(start);
1599
1600 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001601 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 skb->len = curr_frag_len;
1604 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001605 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606 /* Complete packet has now been moved to data */
1607 put_page(page_info->page);
1608 skb->data_len = 0;
1609 skb->tail += curr_frag_len;
1610 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001611 hdr_len = ETH_HLEN;
1612 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001614 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 skb_shinfo(skb)->frags[0].page_offset =
1616 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301617 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1618 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001620 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 skb->tail += hdr_len;
1622 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001623 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624
Sathya Perla2e588f82011-03-11 02:49:26 +00001625 if (rxcp->pkt_size <= rx_frag_size) {
1626 BUG_ON(rxcp->num_rcvd != 1);
1627 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 }
1629
1630 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001631 remaining = rxcp->pkt_size - curr_frag_len;
1632 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301633 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001634 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001636 /* Coalesce all frags from the same physical page in one slot */
1637 if (page_info->page_offset == 0) {
1638 /* Fresh page */
1639 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001640 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001641 skb_shinfo(skb)->frags[j].page_offset =
1642 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001643 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001644 skb_shinfo(skb)->nr_frags++;
1645 } else {
1646 put_page(page_info->page);
1647 }
1648
Eric Dumazet9e903e02011-10-18 21:00:24 +00001649 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 skb->len += curr_frag_len;
1651 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001652 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001654 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001656 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657}
1658
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001659/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301660static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001661 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001663 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001664 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001666
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001667 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001668 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001669 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001670 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 return;
1672 }
1673
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001676 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001677 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001678 else
1679 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001681 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001682 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301685
1686 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301687 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Jiri Pirko343e43c2011-08-25 02:50:51 +00001689 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001690 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001691
1692 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693}
1694
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001695/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001696static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1697 struct napi_struct *napi,
1698 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001702 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001703 u16 remaining, curr_frag_len;
1704 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001705
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001707 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001709 return;
1710 }
1711
Sathya Perla2e588f82011-03-11 02:49:26 +00001712 remaining = rxcp->pkt_size;
1713 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301714 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
1716 curr_frag_len = min(remaining, rx_frag_size);
1717
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001718 /* Coalesce all frags from the same physical page in one slot */
1719 if (i == 0 || page_info->page_offset == 0) {
1720 /* First frag or Fresh page */
1721 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001722 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001723 skb_shinfo(skb)->frags[j].page_offset =
1724 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001725 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001726 } else {
1727 put_page(page_info->page);
1728 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001729 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001730 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732 memset(page_info, 0, sizeof(*page_info));
1733 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001734 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001736 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001737 skb->len = rxcp->pkt_size;
1738 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001739 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001740 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001741 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301743
1744 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301745 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001746
Jiri Pirko343e43c2011-08-25 02:50:51 +00001747 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001748 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001749
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001750 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751}
1752
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001753static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1754 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755{
Sathya Perla2e588f82011-03-11 02:49:26 +00001756 rxcp->pkt_size =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1758 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1759 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1760 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001761 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001762 rxcp->ip_csum =
1763 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1764 rxcp->l4_csum =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1766 rxcp->ipv6 =
1767 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001768 rxcp->num_rcvd =
1769 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1770 rxcp->pkt_type =
1771 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001772 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001773 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001774 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301775 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001776 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301777 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1778 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001779 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001780 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301781 rxcp->tunneled =
1782 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001783}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001785static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1786 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001787{
1788 rxcp->pkt_size =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1790 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1791 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1792 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001793 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001794 rxcp->ip_csum =
1795 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1796 rxcp->l4_csum =
1797 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1798 rxcp->ipv6 =
1799 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001800 rxcp->num_rcvd =
1801 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1802 rxcp->pkt_type =
1803 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001804 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001805 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001806 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301807 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001808 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301809 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1810 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001811 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001812 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001813 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1814 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001815}
1816
1817static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1818{
1819 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1820 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1821 struct be_adapter *adapter = rxo->adapter;
1822
1823 /* For checking the valid bit it is Ok to use either definition as the
1824 * valid bit is at the same position in both v0 and v1 Rx compl */
1825 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 return NULL;
1827
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001828 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001829 be_dws_le_to_cpu(compl, sizeof(*compl));
1830
1831 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001832 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001833 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001834 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001835
Somnath Koture38b1702013-05-29 22:55:56 +00001836 if (rxcp->ip_frag)
1837 rxcp->l4_csum = 0;
1838
Sathya Perla15d72182011-03-21 20:49:26 +00001839 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301840 /* In QNQ modes, if qnq bit is not set, then the packet was
1841 * tagged only with the transparent outer vlan-tag and must
1842 * not be treated as a vlan packet by host
1843 */
1844 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001845 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001846
Sathya Perla15d72182011-03-21 20:49:26 +00001847 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001848 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001849
Somnath Kotur939cf302011-08-18 21:51:49 -07001850 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301851 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001852 rxcp->vlanf = 0;
1853 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001854
1855 /* As the compl has been parsed, reset it; we wont touch it again */
1856 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
Sathya Perla3abcded2010-10-03 22:12:27 -07001858 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859 return rxcp;
1860}
1861
Eric Dumazet1829b082011-03-01 05:48:12 +00001862static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001865
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001867 gfp |= __GFP_COMP;
1868 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869}
1870
1871/*
1872 * Allocate a page, split it to fragments of size rx_frag_size and post as
1873 * receive buffers to BE
1874 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001875static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876{
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001878 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001881 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 struct be_eth_rx_d *rxd;
1883 u64 page_dmaaddr = 0, frag_dmaaddr;
1884 u32 posted, page_offset = 0;
1885
Sathya Perla3abcded2010-10-03 22:12:27 -07001886 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1888 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001889 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001891 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892 break;
1893 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001894 page_dmaaddr = dma_map_page(dev, pagep, 0,
1895 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001896 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001897 if (dma_mapping_error(dev, page_dmaaddr)) {
1898 put_page(pagep);
1899 pagep = NULL;
1900 rx_stats(rxo)->rx_post_fail++;
1901 break;
1902 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301903 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 } else {
1905 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301906 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301908 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
1911 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301912 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1914 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915
1916 /* Any space left in the current big page for another frag? */
1917 if ((page_offset + rx_frag_size + rx_frag_size) >
1918 adapter->big_page_size) {
1919 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301920 page_info->last_frag = true;
1921 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1922 } else {
1923 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001925
1926 prev_page_info = page_info;
1927 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001928 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301930
1931 /* Mark the last frag of a page when we break out of the above loop
1932 * with no more slots available in the RXQ
1933 */
1934 if (pagep) {
1935 prev_page_info->last_frag = true;
1936 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1937 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938
1939 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301941 if (rxo->rx_post_starved)
1942 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001943 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001944 } else if (atomic_read(&rxq->used) == 0) {
1945 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948}
1949
Sathya Perla5fb379e2009-06-18 00:02:59 +00001950static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1953
1954 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1955 return NULL;
1956
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001957 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1959
1960 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1961
1962 queue_tail_inc(tx_cq);
1963 return txcp;
1964}
1965
Sathya Perla3c8def92011-06-12 20:01:58 +00001966static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301967 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968{
Sathya Perla3c8def92011-06-12 20:01:58 +00001969 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001970 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001971 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001973 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1974 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001976 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001978 sent_skbs[txq->tail] = NULL;
1979
1980 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001981 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001983 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001985 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001986 unmap_tx_frag(&adapter->pdev->dev, wrb,
1987 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001988 unmap_skb_hdr = false;
1989
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990 num_wrbs++;
1991 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001992 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001994 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001995 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996}
1997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998/* Return the number of events in the event queue */
1999static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002000{
2001 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 do {
2005 eqe = queue_tail_node(&eqo->q);
2006 if (eqe->evt == 0)
2007 break;
2008
2009 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002010 eqe->evt = 0;
2011 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012 queue_tail_inc(&eqo->q);
2013 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002014
2015 return num;
2016}
2017
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018/* Leaves the EQ is disarmed state */
2019static void be_eq_clean(struct be_eq_obj *eqo)
2020{
2021 int num = events_get(eqo);
2022
2023 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2024}
2025
2026static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
2028 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002029 struct be_queue_info *rxq = &rxo->q;
2030 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002031 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002032 struct be_adapter *adapter = rxo->adapter;
2033 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perlad23e9462012-12-17 19:38:51 +00002035 /* Consume pending rx completions.
2036 * Wait for the flush completion (identified by zero num_rcvd)
2037 * to arrive. Notify CQ even when there are no more CQ entries
2038 * for HW to flush partially coalesced CQ entries.
2039 * In Lancer, there is no need to wait for flush compl.
2040 */
2041 for (;;) {
2042 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302043 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002044 if (lancer_chip(adapter))
2045 break;
2046
2047 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2048 dev_warn(&adapter->pdev->dev,
2049 "did not receive flush compl\n");
2050 break;
2051 }
2052 be_cq_notify(adapter, rx_cq->id, true, 0);
2053 mdelay(1);
2054 } else {
2055 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002056 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002057 if (rxcp->num_rcvd == 0)
2058 break;
2059 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 }
2061
Sathya Perlad23e9462012-12-17 19:38:51 +00002062 /* After cleanup, leave the CQ in unarmed state */
2063 be_cq_notify(adapter, rx_cq->id, false, 0);
2064
2065 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302066 while (atomic_read(&rxq->used) > 0) {
2067 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068 put_page(page_info->page);
2069 memset(page_info, 0, sizeof(*page_info));
2070 }
2071 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002072 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073}
2074
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002075static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002077 struct be_tx_obj *txo;
2078 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002079 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002080 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002081 struct sk_buff *sent_skb;
2082 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002083 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302085 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002086 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002087 pending_txqs = adapter->num_tx_qs;
2088
2089 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302090 cmpl = 0;
2091 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002092 txq = &txo->q;
2093 while ((txcp = be_tx_compl_get(&txo->cq))) {
2094 end_idx =
2095 AMAP_GET_BITS(struct amap_eth_tx_compl,
2096 wrb_index, txcp);
2097 num_wrbs += be_tx_compl_process(adapter, txo,
2098 end_idx);
2099 cmpl++;
2100 }
2101 if (cmpl) {
2102 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2103 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302104 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002105 }
2106 if (atomic_read(&txq->used) == 0)
2107 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002108 }
2109
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302110 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002111 break;
2112
2113 mdelay(1);
2114 } while (true);
2115
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002116 for_all_tx_queues(adapter, txo, i) {
2117 txq = &txo->q;
2118 if (atomic_read(&txq->used))
2119 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2120 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002121
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002122 /* free posted tx for which compls will never arrive */
2123 while (atomic_read(&txq->used)) {
2124 sent_skb = txo->sent_skb_list[txq->tail];
2125 end_idx = txq->tail;
2126 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2127 &dummy_wrb);
2128 index_adv(&end_idx, num_wrbs - 1, txq->len);
2129 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2130 atomic_sub(num_wrbs, &txq->used);
2131 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133}
2134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135static void be_evt_queues_destroy(struct be_adapter *adapter)
2136{
2137 struct be_eq_obj *eqo;
2138 int i;
2139
2140 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002141 if (eqo->q.created) {
2142 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302144 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302145 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002146 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 be_queue_free(adapter, &eqo->q);
2148 }
2149}
2150
2151static int be_evt_queues_create(struct be_adapter *adapter)
2152{
2153 struct be_queue_info *eq;
2154 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302155 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 int i, rc;
2157
Sathya Perla92bf14a2013-08-27 16:57:32 +05302158 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2159 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160
2161 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302162 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2163 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302164 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302165 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166 eqo->adapter = adapter;
2167 eqo->tx_budget = BE_TX_BUDGET;
2168 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302169 aic->max_eqd = BE_MAX_EQD;
2170 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171
2172 eq = &eqo->q;
2173 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302174 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175 if (rc)
2176 return rc;
2177
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302178 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 if (rc)
2180 return rc;
2181 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002182 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183}
2184
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185static void be_mcc_queues_destroy(struct be_adapter *adapter)
2186{
2187 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002188
Sathya Perla8788fdc2009-07-27 22:52:03 +00002189 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002190 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002191 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002192 be_queue_free(adapter, q);
2193
Sathya Perla8788fdc2009-07-27 22:52:03 +00002194 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002195 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002196 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002197 be_queue_free(adapter, q);
2198}
2199
2200/* Must be called only after TX qs are created as MCC shares TX EQ */
2201static int be_mcc_queues_create(struct be_adapter *adapter)
2202{
2203 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002204
Sathya Perla8788fdc2009-07-27 22:52:03 +00002205 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002206 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302207 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002208 goto err;
2209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 /* Use the default EQ for MCC completions */
2211 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002212 goto mcc_cq_free;
2213
Sathya Perla8788fdc2009-07-27 22:52:03 +00002214 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002215 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2216 goto mcc_cq_destroy;
2217
Sathya Perla8788fdc2009-07-27 22:52:03 +00002218 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002219 goto mcc_q_free;
2220
2221 return 0;
2222
2223mcc_q_free:
2224 be_queue_free(adapter, q);
2225mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002226 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002227mcc_cq_free:
2228 be_queue_free(adapter, cq);
2229err:
2230 return -1;
2231}
2232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233static void be_tx_queues_destroy(struct be_adapter *adapter)
2234{
2235 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002236 struct be_tx_obj *txo;
2237 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Sathya Perla3c8def92011-06-12 20:01:58 +00002239 for_all_tx_queues(adapter, txo, i) {
2240 q = &txo->q;
2241 if (q->created)
2242 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2243 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244
Sathya Perla3c8def92011-06-12 20:01:58 +00002245 q = &txo->cq;
2246 if (q->created)
2247 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2248 be_queue_free(adapter, q);
2249 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250}
2251
Sathya Perla77071332013-08-27 16:57:34 +05302252static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002255 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302256 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
Sathya Perla92bf14a2013-08-27 16:57:32 +05302258 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002259
Sathya Perla3c8def92011-06-12 20:01:58 +00002260 for_all_tx_queues(adapter, txo, i) {
2261 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2263 sizeof(struct be_eth_tx_compl));
2264 if (status)
2265 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266
John Stultz827da442013-10-07 15:51:58 -07002267 u64_stats_init(&txo->stats.sync);
2268 u64_stats_init(&txo->stats.sync_compl);
2269
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 /* If num_evt_qs is less than num_tx_qs, then more than
2271 * one txq share an eq
2272 */
2273 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2274 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2275 if (status)
2276 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2279 sizeof(struct be_eth_wrb));
2280 if (status)
2281 return status;
2282
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002283 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 if (status)
2285 return status;
2286 }
2287
Sathya Perlad3791422012-09-28 04:39:44 +00002288 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2289 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 return 0;
2291}
2292
2293static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294{
2295 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 struct be_rx_obj *rxo;
2297 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 q = &rxo->cq;
2301 if (q->created)
2302 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2303 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305}
2306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002308{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002310 struct be_rx_obj *rxo;
2311 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312
Sathya Perla92bf14a2013-08-27 16:57:32 +05302313 /* We can create as many RSS rings as there are EQs. */
2314 adapter->num_rx_qs = adapter->num_evt_qs;
2315
2316 /* We'll use RSS only if atleast 2 RSS rings are supported.
2317 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302319 if (adapter->num_rx_qs > 1)
2320 adapter->num_rx_qs++;
2321
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002323 for_all_rx_queues(adapter, rxo, i) {
2324 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002325 cq = &rxo->cq;
2326 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302327 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330
John Stultz827da442013-10-07 15:51:58 -07002331 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2333 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002334 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002336 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
Sathya Perlad3791422012-09-28 04:39:44 +00002338 dev_info(&adapter->pdev->dev,
2339 "created %d RSS queue(s) and 1 default RX queue\n",
2340 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002342}
2343
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344static irqreturn_t be_intx(int irq, void *dev)
2345{
Sathya Perlae49cc342012-11-27 19:50:02 +00002346 struct be_eq_obj *eqo = dev;
2347 struct be_adapter *adapter = eqo->adapter;
2348 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002349
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002350 /* IRQ is not expected when NAPI is scheduled as the EQ
2351 * will not be armed.
2352 * But, this can happen on Lancer INTx where it takes
2353 * a while to de-assert INTx or in BE2 where occasionaly
2354 * an interrupt may be raised even when EQ is unarmed.
2355 * If NAPI is already scheduled, then counting & notifying
2356 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002357 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002358 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002359 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002360 __napi_schedule(&eqo->napi);
2361 if (num_evts)
2362 eqo->spurious_intr = 0;
2363 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002364 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002365
2366 /* Return IRQ_HANDLED only for the the first spurious intr
2367 * after a valid intr to stop the kernel from branding
2368 * this irq as a bad one!
2369 */
2370 if (num_evts || eqo->spurious_intr++ == 0)
2371 return IRQ_HANDLED;
2372 else
2373 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374}
2375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379
Sathya Perla0b545a62012-11-23 00:27:18 +00002380 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2381 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382 return IRQ_HANDLED;
2383}
2384
Sathya Perla2e588f82011-03-11 02:49:26 +00002385static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386{
Somnath Koture38b1702013-05-29 22:55:56 +00002387 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388}
2389
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302391 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392{
Sathya Perla3abcded2010-10-03 22:12:27 -07002393 struct be_adapter *adapter = rxo->adapter;
2394 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002395 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396 u32 work_done;
2397
2398 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002399 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400 if (!rxcp)
2401 break;
2402
Sathya Perla12004ae2011-08-02 19:57:46 +00002403 /* Is it a flush compl that has no data */
2404 if (unlikely(rxcp->num_rcvd == 0))
2405 goto loop_continue;
2406
2407 /* Discard compl with partial DMA Lancer B0 */
2408 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002410 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002411 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002412
Sathya Perla12004ae2011-08-02 19:57:46 +00002413 /* On BE drop pkts that arrive due to imperfect filtering in
2414 * promiscuous mode on some skews
2415 */
2416 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302417 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002419 goto loop_continue;
2420 }
2421
Sathya Perla6384a4d2013-10-25 10:40:16 +05302422 /* Don't do gro when we're busy_polling */
2423 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002425 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302426 be_rx_compl_process(rxo, napi, rxcp);
2427
Sathya Perla12004ae2011-08-02 19:57:46 +00002428loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002429 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002430 }
2431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 if (work_done) {
2433 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002434
Sathya Perla6384a4d2013-10-25 10:40:16 +05302435 /* When an rx-obj gets into post_starved state, just
2436 * let be_worker do the posting.
2437 */
2438 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2439 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443 return work_done;
2444}
2445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2447 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002449 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002450 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002451
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 for (work_done = 0; work_done < budget; work_done++) {
2453 txcp = be_tx_compl_get(&txo->cq);
2454 if (!txcp)
2455 break;
2456 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302457 AMAP_GET_BITS(struct
2458 amap_eth_tx_compl,
2459 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002460 }
2461
2462 if (work_done) {
2463 be_cq_notify(adapter, txo->cq.id, true, work_done);
2464 atomic_sub(num_wrbs, &txo->q.used);
2465
2466 /* As Tx wrbs have been freed up, wake up netdev queue
2467 * if it was stopped due to lack of tx wrbs. */
2468 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302469 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002471 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2474 tx_stats(txo)->tx_compl += work_done;
2475 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2476 }
2477 return (work_done < budget); /* Done */
2478}
Sathya Perla3c8def92011-06-12 20:01:58 +00002479
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302480int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002481{
2482 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2483 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002484 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302485 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002486 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002487
Sathya Perla0b545a62012-11-23 00:27:18 +00002488 num_evts = events_get(eqo);
2489
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 /* Process all TXQs serviced by this EQ */
2491 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2492 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2493 eqo->tx_budget, i);
2494 if (!tx_done)
2495 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496 }
2497
Sathya Perla6384a4d2013-10-25 10:40:16 +05302498 if (be_lock_napi(eqo)) {
2499 /* This loop will iterate twice for EQ0 in which
2500 * completions of the last RXQ (default one) are also processed
2501 * For other EQs the loop iterates only once
2502 */
2503 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2504 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2505 max_work = max(work, max_work);
2506 }
2507 be_unlock_napi(eqo);
2508 } else {
2509 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002510 }
2511
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 if (is_mcc_eqo(eqo))
2513 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002514
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002515 if (max_work < budget) {
2516 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002517 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 } else {
2519 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002520 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002521 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002522 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523}
2524
Sathya Perla6384a4d2013-10-25 10:40:16 +05302525#ifdef CONFIG_NET_RX_BUSY_POLL
2526static int be_busy_poll(struct napi_struct *napi)
2527{
2528 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2529 struct be_adapter *adapter = eqo->adapter;
2530 struct be_rx_obj *rxo;
2531 int i, work = 0;
2532
2533 if (!be_lock_busy_poll(eqo))
2534 return LL_FLUSH_BUSY;
2535
2536 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2537 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2538 if (work)
2539 break;
2540 }
2541
2542 be_unlock_busy_poll(eqo);
2543 return work;
2544}
2545#endif
2546
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002547void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002548{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002549 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2550 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002551 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302552 bool error_detected = false;
2553 struct device *dev = &adapter->pdev->dev;
2554 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002555
Sathya Perlad23e9462012-12-17 19:38:51 +00002556 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002557 return;
2558
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002559 if (lancer_chip(adapter)) {
2560 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2561 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2562 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302563 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002564 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302565 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302566 adapter->hw_error = true;
2567 /* Do not log error messages if its a FW reset */
2568 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2569 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2570 dev_info(dev, "Firmware update in progress\n");
2571 } else {
2572 error_detected = true;
2573 dev_err(dev, "Error detected in the card\n");
2574 dev_err(dev, "ERR: sliport status 0x%x\n",
2575 sliport_status);
2576 dev_err(dev, "ERR: sliport error1 0x%x\n",
2577 sliport_err1);
2578 dev_err(dev, "ERR: sliport error2 0x%x\n",
2579 sliport_err2);
2580 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002581 }
2582 } else {
2583 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302584 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002585 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302586 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002587 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302588 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002589 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302590 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002591
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002592 ue_lo = (ue_lo & ~ue_lo_mask);
2593 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002594
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302595 /* On certain platforms BE hardware can indicate spurious UEs.
2596 * Allow HW to stop working completely in case of a real UE.
2597 * Hence not setting the hw_error for UE detection.
2598 */
2599
2600 if (ue_lo || ue_hi) {
2601 error_detected = true;
2602 dev_err(dev,
2603 "Unrecoverable Error detected in the adapter");
2604 dev_err(dev, "Please reboot server to recover");
2605 if (skyhawk_chip(adapter))
2606 adapter->hw_error = true;
2607 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2608 if (ue_lo & 1)
2609 dev_err(dev, "UE: %s bit set\n",
2610 ue_status_low_desc[i]);
2611 }
2612 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2613 if (ue_hi & 1)
2614 dev_err(dev, "UE: %s bit set\n",
2615 ue_status_hi_desc[i]);
2616 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302617 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002618 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302619 if (error_detected)
2620 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002621}
2622
Sathya Perla8d56ff12009-11-22 22:02:26 +00002623static void be_msix_disable(struct be_adapter *adapter)
2624{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002625 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002626 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002627 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302628 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002629 }
2630}
2631
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002632static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002633{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002634 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002635 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636
Sathya Perla92bf14a2013-08-27 16:57:32 +05302637 /* If RoCE is supported, program the max number of NIC vectors that
2638 * may be configured via set-channels, along with vectors needed for
2639 * RoCe. Else, just program the number we'll use initially.
2640 */
2641 if (be_roce_supported(adapter))
2642 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2643 2 * num_online_cpus());
2644 else
2645 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002646
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002647 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648 adapter->msix_entries[i].entry = i;
2649
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002650 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2651 MIN_MSIX_VECTORS, num_vec);
2652 if (num_vec < 0)
2653 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002654
Sathya Perla92bf14a2013-08-27 16:57:32 +05302655 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2656 adapter->num_msix_roce_vec = num_vec / 2;
2657 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2658 adapter->num_msix_roce_vec);
2659 }
2660
2661 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2662
2663 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2664 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002665 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002666
2667fail:
2668 dev_warn(dev, "MSIx enable failed\n");
2669
2670 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2671 if (!be_physfn(adapter))
2672 return num_vec;
2673 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674}
2675
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002676static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302677 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002678{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302679 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680}
2681
2682static int be_msix_register(struct be_adapter *adapter)
2683{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002684 struct net_device *netdev = adapter->netdev;
2685 struct be_eq_obj *eqo;
2686 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002688 for_all_evt_queues(adapter, eqo, i) {
2689 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2690 vec = be_msix_vec_get(adapter, eqo);
2691 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002692 if (status)
2693 goto err_msix;
2694 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002695
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002697err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002698 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2699 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2700 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302701 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002702 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703 return status;
2704}
2705
2706static int be_irq_register(struct be_adapter *adapter)
2707{
2708 struct net_device *netdev = adapter->netdev;
2709 int status;
2710
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002711 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002712 status = be_msix_register(adapter);
2713 if (status == 0)
2714 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002715 /* INTx is not supported for VF */
2716 if (!be_physfn(adapter))
2717 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718 }
2719
Sathya Perlae49cc342012-11-27 19:50:02 +00002720 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721 netdev->irq = adapter->pdev->irq;
2722 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002723 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724 if (status) {
2725 dev_err(&adapter->pdev->dev,
2726 "INTx request IRQ failed - err %d\n", status);
2727 return status;
2728 }
2729done:
2730 adapter->isr_registered = true;
2731 return 0;
2732}
2733
2734static void be_irq_unregister(struct be_adapter *adapter)
2735{
2736 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002737 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002738 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739
2740 if (!adapter->isr_registered)
2741 return;
2742
2743 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002744 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002745 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746 goto done;
2747 }
2748
2749 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002750 for_all_evt_queues(adapter, eqo, i)
2751 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002752
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753done:
2754 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755}
2756
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002757static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002758{
2759 struct be_queue_info *q;
2760 struct be_rx_obj *rxo;
2761 int i;
2762
2763 for_all_rx_queues(adapter, rxo, i) {
2764 q = &rxo->q;
2765 if (q->created) {
2766 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002767 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002768 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002769 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002770 }
2771}
2772
Sathya Perla889cd4b2010-05-30 23:33:45 +00002773static int be_close(struct net_device *netdev)
2774{
2775 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002776 struct be_eq_obj *eqo;
2777 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002778
Kalesh APe1ad8e32014-04-14 16:12:41 +05302779 /* This protection is needed as be_close() may be called even when the
2780 * adapter is in cleared state (after eeh perm failure)
2781 */
2782 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2783 return 0;
2784
Parav Pandit045508a2012-03-26 14:27:13 +00002785 be_roce_dev_close(adapter);
2786
Ivan Veceradff345c52013-11-27 08:59:32 +01002787 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2788 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002789 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302790 be_disable_busy_poll(eqo);
2791 }
David S. Miller71237b62013-11-28 18:53:36 -05002792 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002793 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002794
2795 be_async_mcc_disable(adapter);
2796
2797 /* Wait for all pending tx completions to arrive so that
2798 * all tx skbs are freed.
2799 */
Sathya Perlafba87552013-05-08 02:05:50 +00002800 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302801 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002802
2803 be_rx_qs_destroy(adapter);
2804
Ajit Khaparded11a3472013-11-18 10:44:37 -06002805 for (i = 1; i < (adapter->uc_macs + 1); i++)
2806 be_cmd_pmac_del(adapter, adapter->if_handle,
2807 adapter->pmac_id[i], 0);
2808 adapter->uc_macs = 0;
2809
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002810 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002811 if (msix_enabled(adapter))
2812 synchronize_irq(be_msix_vec_get(adapter, eqo));
2813 else
2814 synchronize_irq(netdev->irq);
2815 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002816 }
2817
Sathya Perla889cd4b2010-05-30 23:33:45 +00002818 be_irq_unregister(adapter);
2819
Sathya Perla482c9e72011-06-29 23:33:17 +00002820 return 0;
2821}
2822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002823static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002824{
2825 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002826 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302827 u8 rss_hkey[RSS_HASH_KEY_LEN];
2828 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002829
2830 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2832 sizeof(struct be_eth_rx_d));
2833 if (rc)
2834 return rc;
2835 }
2836
2837 /* The FW would like the default RXQ to be created first */
2838 rxo = default_rxo(adapter);
2839 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2840 adapter->if_handle, false, &rxo->rss_id);
2841 if (rc)
2842 return rc;
2843
2844 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002845 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846 rx_frag_size, adapter->if_handle,
2847 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002848 if (rc)
2849 return rc;
2850 }
2851
2852 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302853 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2854 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002855 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302856 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002857 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302858 rss->rsstable[j + i] = rxo->rss_id;
2859 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002860 }
2861 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302862 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2863 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002864
2865 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302866 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2867 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302868 } else {
2869 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302870 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302871 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002872
Venkata Duvvurue2557872014-04-21 15:38:00 +05302873 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302874 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302875 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302876 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302877 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302878 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002879 }
2880
Venkata Duvvurue2557872014-04-21 15:38:00 +05302881 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2882
Sathya Perla482c9e72011-06-29 23:33:17 +00002883 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002884 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002885 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002886 return 0;
2887}
2888
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002889static int be_open(struct net_device *netdev)
2890{
2891 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002892 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002893 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002895 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002896 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002897
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002898 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002899 if (status)
2900 goto err;
2901
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002902 status = be_irq_register(adapter);
2903 if (status)
2904 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002906 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002907 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 for_all_tx_queues(adapter, txo, i)
2910 be_cq_notify(adapter, txo->cq.id, true, 0);
2911
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002912 be_async_mcc_enable(adapter);
2913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002914 for_all_evt_queues(adapter, eqo, i) {
2915 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302916 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302917 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002918 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002919 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002920
Sathya Perla323ff712012-09-28 04:39:43 +00002921 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002922 if (!status)
2923 be_link_status_update(adapter, link_status);
2924
Sathya Perlafba87552013-05-08 02:05:50 +00002925 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002926 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302927
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302928#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302929 if (skyhawk_chip(adapter))
2930 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302931#endif
2932
Sathya Perla889cd4b2010-05-30 23:33:45 +00002933 return 0;
2934err:
2935 be_close(adapter->netdev);
2936 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002937}
2938
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002939static int be_setup_wol(struct be_adapter *adapter, bool enable)
2940{
2941 struct be_dma_mem cmd;
2942 int status = 0;
2943 u8 mac[ETH_ALEN];
2944
2945 memset(mac, 0, ETH_ALEN);
2946
2947 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002948 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2949 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302950 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302951 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002952
2953 if (enable) {
2954 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302955 PCICFG_PM_CONTROL_OFFSET,
2956 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002957 if (status) {
2958 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002959 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002960 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2961 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002962 return status;
2963 }
2964 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302965 adapter->netdev->dev_addr,
2966 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002967 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2968 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2969 } else {
2970 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2971 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2972 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2973 }
2974
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002975 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002976 return status;
2977}
2978
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002979/*
2980 * Generate a seed MAC address from the PF MAC Address using jhash.
2981 * MAC Address for VFs are assigned incrementally starting from the seed.
2982 * These addresses are programmed in the ASIC by the PF and the VF driver
2983 * queries for the MAC address during its probe.
2984 */
Sathya Perla4c876612013-02-03 20:30:11 +00002985static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002986{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002987 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002988 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002989 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002990 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002991
2992 be_vf_eth_addr_generate(adapter, mac);
2993
Sathya Perla11ac75e2011-12-13 00:58:50 +00002994 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302995 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002996 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002997 vf_cfg->if_handle,
2998 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302999 else
3000 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3001 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003002
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003003 if (status)
3004 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303005 "Mac address assignment failed for VF %d\n",
3006 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003007 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003008 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003009
3010 mac[5] += 1;
3011 }
3012 return status;
3013}
3014
Sathya Perla4c876612013-02-03 20:30:11 +00003015static int be_vfs_mac_query(struct be_adapter *adapter)
3016{
3017 int status, vf;
3018 u8 mac[ETH_ALEN];
3019 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003020
3021 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303022 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3023 mac, vf_cfg->if_handle,
3024 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003025 if (status)
3026 return status;
3027 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3028 }
3029 return 0;
3030}
3031
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003032static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003033{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003034 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003035 u32 vf;
3036
Sathya Perla257a3fe2013-06-14 15:54:51 +05303037 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003038 dev_warn(&adapter->pdev->dev,
3039 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003040 goto done;
3041 }
3042
Sathya Perlab4c1df92013-05-08 02:05:47 +00003043 pci_disable_sriov(adapter->pdev);
3044
Sathya Perla11ac75e2011-12-13 00:58:50 +00003045 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303046 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003047 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3048 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303049 else
3050 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3051 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003052
Sathya Perla11ac75e2011-12-13 00:58:50 +00003053 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3054 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003055done:
3056 kfree(adapter->vf_cfg);
3057 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303058 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003059}
3060
Sathya Perla77071332013-08-27 16:57:34 +05303061static void be_clear_queues(struct be_adapter *adapter)
3062{
3063 be_mcc_queues_destroy(adapter);
3064 be_rx_cqs_destroy(adapter);
3065 be_tx_queues_destroy(adapter);
3066 be_evt_queues_destroy(adapter);
3067}
3068
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303069static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003070{
Sathya Perla191eb752012-02-23 18:50:13 +00003071 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3072 cancel_delayed_work_sync(&adapter->work);
3073 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3074 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303075}
3076
Somnath Koturb05004a2013-12-05 12:08:16 +05303077static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303078{
3079 int i;
3080
Somnath Koturb05004a2013-12-05 12:08:16 +05303081 if (adapter->pmac_id) {
3082 for (i = 0; i < (adapter->uc_macs + 1); i++)
3083 be_cmd_pmac_del(adapter, adapter->if_handle,
3084 adapter->pmac_id[i], 0);
3085 adapter->uc_macs = 0;
3086
3087 kfree(adapter->pmac_id);
3088 adapter->pmac_id = NULL;
3089 }
3090}
3091
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303092#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303093static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3094{
3095 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3096 be_cmd_manage_iface(adapter, adapter->if_handle,
3097 OP_CONVERT_TUNNEL_TO_NORMAL);
3098
3099 if (adapter->vxlan_port)
3100 be_cmd_set_vxlan_port(adapter, 0);
3101
3102 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3103 adapter->vxlan_port = 0;
3104}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303105#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303106
Somnath Koturb05004a2013-12-05 12:08:16 +05303107static int be_clear(struct be_adapter *adapter)
3108{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303109 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003110
Sathya Perla11ac75e2011-12-13 00:58:50 +00003111 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003112 be_vf_clear(adapter);
3113
Vasundhara Volambec84e62014-06-30 13:01:32 +05303114 /* Re-configure FW to distribute resources evenly across max-supported
3115 * number of VFs, only when VFs are not already enabled.
3116 */
3117 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3118 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3119 pci_sriov_get_totalvfs(adapter->pdev));
3120
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303121#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303122 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303123#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303124 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303125 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003126
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003127 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003128
Sathya Perla77071332013-08-27 16:57:34 +05303129 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003131 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303132 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003133 return 0;
3134}
3135
Sathya Perla4c876612013-02-03 20:30:11 +00003136static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003137{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303138 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003139 struct be_vf_cfg *vf_cfg;
3140 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003141 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003142
Sathya Perla4c876612013-02-03 20:30:11 +00003143 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3144 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003145
Sathya Perla4c876612013-02-03 20:30:11 +00003146 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303147 if (!BE3_chip(adapter)) {
3148 status = be_cmd_get_profile_config(adapter, &res,
3149 vf + 1);
3150 if (!status)
3151 cap_flags = res.if_cap_flags;
3152 }
Sathya Perla4c876612013-02-03 20:30:11 +00003153
3154 /* If a FW profile exists, then cap_flags are updated */
3155 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303156 BE_IF_FLAGS_BROADCAST |
3157 BE_IF_FLAGS_MULTICAST);
3158 status =
3159 be_cmd_if_create(adapter, cap_flags, en_flags,
3160 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003161 if (status)
3162 goto err;
3163 }
3164err:
3165 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003166}
3167
Sathya Perla39f1d942012-05-08 19:41:24 +00003168static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003169{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003170 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003171 int vf;
3172
Sathya Perla39f1d942012-05-08 19:41:24 +00003173 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3174 GFP_KERNEL);
3175 if (!adapter->vf_cfg)
3176 return -ENOMEM;
3177
Sathya Perla11ac75e2011-12-13 00:58:50 +00003178 for_all_vfs(adapter, vf_cfg, vf) {
3179 vf_cfg->if_handle = -1;
3180 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003181 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003182 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003183}
3184
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003185static int be_vf_setup(struct be_adapter *adapter)
3186{
Sathya Perla4c876612013-02-03 20:30:11 +00003187 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303188 struct be_vf_cfg *vf_cfg;
3189 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303190 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003191
Sathya Perla257a3fe2013-06-14 15:54:51 +05303192 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003193
3194 status = be_vf_setup_init(adapter);
3195 if (status)
3196 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003197
Sathya Perla4c876612013-02-03 20:30:11 +00003198 if (old_vfs) {
3199 for_all_vfs(adapter, vf_cfg, vf) {
3200 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3201 if (status)
3202 goto err;
3203 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003204
Sathya Perla4c876612013-02-03 20:30:11 +00003205 status = be_vfs_mac_query(adapter);
3206 if (status)
3207 goto err;
3208 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303209 status = be_vfs_if_create(adapter);
3210 if (status)
3211 goto err;
3212
Sathya Perla39f1d942012-05-08 19:41:24 +00003213 status = be_vf_eth_addr_config(adapter);
3214 if (status)
3215 goto err;
3216 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003217
Sathya Perla11ac75e2011-12-13 00:58:50 +00003218 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303219 /* Allow VFs to programs MAC/VLAN filters */
3220 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3221 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3222 status = be_cmd_set_fn_privileges(adapter,
3223 privileges |
3224 BE_PRIV_FILTMGMT,
3225 vf + 1);
3226 if (!status)
3227 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3228 vf);
3229 }
3230
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303231 /* Allow full available bandwidth */
3232 if (!old_vfs)
3233 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003234
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303235 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303236 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303237 be_cmd_set_logical_link_config(adapter,
3238 IFLA_VF_LINK_STATE_AUTO,
3239 vf+1);
3240 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003241 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003242
3243 if (!old_vfs) {
3244 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3245 if (status) {
3246 dev_err(dev, "SRIOV enable failed\n");
3247 adapter->num_vfs = 0;
3248 goto err;
3249 }
3250 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303251
3252 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003253 return 0;
3254err:
Sathya Perla4c876612013-02-03 20:30:11 +00003255 dev_err(dev, "VF setup failed\n");
3256 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003257 return status;
3258}
3259
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303260/* Converting function_mode bits on BE3 to SH mc_type enums */
3261
3262static u8 be_convert_mc_type(u32 function_mode)
3263{
Suresh Reddy66064db2014-06-23 16:41:29 +05303264 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303265 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303266 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303267 return FLEX10;
3268 else if (function_mode & VNIC_MODE)
3269 return vNIC2;
3270 else if (function_mode & UMC_ENABLED)
3271 return UMC;
3272 else
3273 return MC_NONE;
3274}
3275
Sathya Perla92bf14a2013-08-27 16:57:32 +05303276/* On BE2/BE3 FW does not suggest the supported limits */
3277static void BEx_get_resources(struct be_adapter *adapter,
3278 struct be_resources *res)
3279{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303280 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303281
3282 if (be_physfn(adapter))
3283 res->max_uc_mac = BE_UC_PMAC_COUNT;
3284 else
3285 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3286
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303287 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3288
3289 if (be_is_mc(adapter)) {
3290 /* Assuming that there are 4 channels per port,
3291 * when multi-channel is enabled
3292 */
3293 if (be_is_qnq_mode(adapter))
3294 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3295 else
3296 /* In a non-qnq multichannel mode, the pvid
3297 * takes up one vlan entry
3298 */
3299 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3300 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303301 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303302 }
3303
Sathya Perla92bf14a2013-08-27 16:57:32 +05303304 res->max_mcast_mac = BE_MAX_MC;
3305
Vasundhara Volama5243da2014-03-11 18:53:07 +05303306 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3307 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3308 * *only* if it is RSS-capable.
3309 */
3310 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3311 !be_physfn(adapter) || (be_is_mc(adapter) &&
3312 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303313 res->max_tx_qs = 1;
3314 else
3315 res->max_tx_qs = BE3_MAX_TX_QS;
3316
3317 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3318 !use_sriov && be_physfn(adapter))
3319 res->max_rss_qs = (adapter->be3_native) ?
3320 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3321 res->max_rx_qs = res->max_rss_qs + 1;
3322
Suresh Reddye3dc8672014-01-06 13:02:25 +05303323 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303324 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303325 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3326 else
3327 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303328
3329 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3330 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3331 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3332}
3333
Sathya Perla30128032011-11-10 19:17:57 +00003334static void be_setup_init(struct be_adapter *adapter)
3335{
3336 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003337 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003338 adapter->if_handle = -1;
3339 adapter->be3_native = false;
3340 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003341 if (be_physfn(adapter))
3342 adapter->cmd_privileges = MAX_PRIVILEGES;
3343 else
3344 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003345}
3346
Vasundhara Volambec84e62014-06-30 13:01:32 +05303347static int be_get_sriov_config(struct be_adapter *adapter)
3348{
3349 struct device *dev = &adapter->pdev->dev;
3350 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303351 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303352
3353 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303354 be_cmd_get_profile_config(adapter, &res, 0);
3355
Vasundhara Volambec84e62014-06-30 13:01:32 +05303356 if (BE3_chip(adapter) && !res.max_vfs) {
3357 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3358 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3359 }
3360
Sathya Perlad3d18312014-08-01 17:47:30 +05303361 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303362
3363 if (!be_max_vfs(adapter)) {
3364 if (num_vfs)
3365 dev_warn(dev, "device doesn't support SRIOV\n");
3366 adapter->num_vfs = 0;
3367 return 0;
3368 }
3369
Sathya Perlad3d18312014-08-01 17:47:30 +05303370 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3371
Vasundhara Volambec84e62014-06-30 13:01:32 +05303372 /* validate num_vfs module param */
3373 old_vfs = pci_num_vf(adapter->pdev);
3374 if (old_vfs) {
3375 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3376 if (old_vfs != num_vfs)
3377 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3378 adapter->num_vfs = old_vfs;
3379 } else {
3380 if (num_vfs > be_max_vfs(adapter)) {
3381 dev_info(dev, "Resources unavailable to init %d VFs\n",
3382 num_vfs);
3383 dev_info(dev, "Limiting to %d VFs\n",
3384 be_max_vfs(adapter));
3385 }
3386 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3387 }
3388
3389 return 0;
3390}
3391
Sathya Perla92bf14a2013-08-27 16:57:32 +05303392static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003393{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303394 struct device *dev = &adapter->pdev->dev;
3395 struct be_resources res = {0};
3396 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003397
Sathya Perla92bf14a2013-08-27 16:57:32 +05303398 if (BEx_chip(adapter)) {
3399 BEx_get_resources(adapter, &res);
3400 adapter->res = res;
3401 }
3402
Sathya Perla92bf14a2013-08-27 16:57:32 +05303403 /* For Lancer, SH etc read per-function resource limits from FW.
3404 * GET_FUNC_CONFIG returns per function guaranteed limits.
3405 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3406 */
Sathya Perla4c876612013-02-03 20:30:11 +00003407 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303408 status = be_cmd_get_func_config(adapter, &res);
3409 if (status)
3410 return status;
3411
3412 /* If RoCE may be enabled stash away half the EQs for RoCE */
3413 if (be_roce_supported(adapter))
3414 res.max_evt_qs /= 2;
3415 adapter->res = res;
3416
Sathya Perla92bf14a2013-08-27 16:57:32 +05303417 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3418 be_max_txqs(adapter), be_max_rxqs(adapter),
3419 be_max_rss(adapter), be_max_eqs(adapter),
3420 be_max_vfs(adapter));
3421 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3422 be_max_uc(adapter), be_max_mc(adapter),
3423 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003424 }
3425
Sathya Perla92bf14a2013-08-27 16:57:32 +05303426 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003427}
3428
Sathya Perlad3d18312014-08-01 17:47:30 +05303429static void be_sriov_config(struct be_adapter *adapter)
3430{
3431 struct device *dev = &adapter->pdev->dev;
3432 int status;
3433
3434 status = be_get_sriov_config(adapter);
3435 if (status) {
3436 dev_err(dev, "Failed to query SR-IOV configuration\n");
3437 dev_err(dev, "SR-IOV cannot be enabled\n");
3438 return;
3439 }
3440
3441 /* When the HW is in SRIOV capable configuration, the PF-pool
3442 * resources are equally distributed across the max-number of
3443 * VFs. The user may request only a subset of the max-vfs to be
3444 * enabled. Based on num_vfs, redistribute the resources across
3445 * num_vfs so that each VF will have access to more number of
3446 * resources. This facility is not available in BE3 FW.
3447 * Also, this is done by FW in Lancer chip.
3448 */
3449 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3450 status = be_cmd_set_sriov_config(adapter,
3451 adapter->pool_res,
3452 adapter->num_vfs);
3453 if (status)
3454 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3455 }
3456}
3457
Sathya Perla39f1d942012-05-08 19:41:24 +00003458static int be_get_config(struct be_adapter *adapter)
3459{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303460 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003461 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003462
Kalesh APe97e3cd2014-07-17 16:20:26 +05303463 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003464 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303465 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003466
Vasundhara Volam542963b2014-01-15 13:23:33 +05303467 if (be_physfn(adapter)) {
3468 status = be_cmd_get_active_profile(adapter, &profile_id);
3469 if (!status)
3470 dev_info(&adapter->pdev->dev,
3471 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303472 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303473
Sathya Perlad3d18312014-08-01 17:47:30 +05303474 if (!BE2_chip(adapter) && be_physfn(adapter))
3475 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303476
Sathya Perla92bf14a2013-08-27 16:57:32 +05303477 status = be_get_resources(adapter);
3478 if (status)
3479 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003480
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303481 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3482 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303483 if (!adapter->pmac_id)
3484 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003485
Sathya Perla92bf14a2013-08-27 16:57:32 +05303486 /* Sanitize cfg_num_qs based on HW and platform limits */
3487 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3488
3489 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003490}
3491
Sathya Perla95046b92013-07-23 15:25:02 +05303492static int be_mac_setup(struct be_adapter *adapter)
3493{
3494 u8 mac[ETH_ALEN];
3495 int status;
3496
3497 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3498 status = be_cmd_get_perm_mac(adapter, mac);
3499 if (status)
3500 return status;
3501
3502 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3503 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3504 } else {
3505 /* Maybe the HW was reset; dev_addr must be re-programmed */
3506 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3507 }
3508
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003509 /* For BE3-R VFs, the PF programs the initial MAC address */
3510 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3511 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3512 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303513 return 0;
3514}
3515
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303516static void be_schedule_worker(struct be_adapter *adapter)
3517{
3518 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3519 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3520}
3521
Sathya Perla77071332013-08-27 16:57:34 +05303522static int be_setup_queues(struct be_adapter *adapter)
3523{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303524 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303525 int status;
3526
3527 status = be_evt_queues_create(adapter);
3528 if (status)
3529 goto err;
3530
3531 status = be_tx_qs_create(adapter);
3532 if (status)
3533 goto err;
3534
3535 status = be_rx_cqs_create(adapter);
3536 if (status)
3537 goto err;
3538
3539 status = be_mcc_queues_create(adapter);
3540 if (status)
3541 goto err;
3542
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303543 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3544 if (status)
3545 goto err;
3546
3547 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3548 if (status)
3549 goto err;
3550
Sathya Perla77071332013-08-27 16:57:34 +05303551 return 0;
3552err:
3553 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3554 return status;
3555}
3556
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303557int be_update_queues(struct be_adapter *adapter)
3558{
3559 struct net_device *netdev = adapter->netdev;
3560 int status;
3561
3562 if (netif_running(netdev))
3563 be_close(netdev);
3564
3565 be_cancel_worker(adapter);
3566
3567 /* If any vectors have been shared with RoCE we cannot re-program
3568 * the MSIx table.
3569 */
3570 if (!adapter->num_msix_roce_vec)
3571 be_msix_disable(adapter);
3572
3573 be_clear_queues(adapter);
3574
3575 if (!msix_enabled(adapter)) {
3576 status = be_msix_enable(adapter);
3577 if (status)
3578 return status;
3579 }
3580
3581 status = be_setup_queues(adapter);
3582 if (status)
3583 return status;
3584
3585 be_schedule_worker(adapter);
3586
3587 if (netif_running(netdev))
3588 status = be_open(netdev);
3589
3590 return status;
3591}
3592
Sathya Perla5fb379e2009-06-18 00:02:59 +00003593static int be_setup(struct be_adapter *adapter)
3594{
Sathya Perla39f1d942012-05-08 19:41:24 +00003595 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303596 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003597 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003598
Sathya Perla30128032011-11-10 19:17:57 +00003599 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003600
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003601 if (!lancer_chip(adapter))
3602 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003603
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003604 status = be_get_config(adapter);
3605 if (status)
3606 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003607
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003608 status = be_msix_enable(adapter);
3609 if (status)
3610 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003611
Sathya Perla77071332013-08-27 16:57:34 +05303612 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3613 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3614 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3615 en_flags |= BE_IF_FLAGS_RSS;
3616 en_flags = en_flags & be_if_cap_flags(adapter);
3617 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3618 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003619 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003620 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003621
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303622 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3623 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303624 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303625 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003626 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003627 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003629 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003630
Sathya Perla95046b92013-07-23 15:25:02 +05303631 status = be_mac_setup(adapter);
3632 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003633 goto err;
3634
Kalesh APe97e3cd2014-07-17 16:20:26 +05303635 be_cmd_get_fw_ver(adapter);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003636
Somnath Koture9e2a902013-10-24 14:37:53 +05303637 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3638 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3639 adapter->fw_ver);
3640 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3641 }
3642
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003643 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003644 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003645
3646 be_set_rx_mode(adapter->netdev);
3647
Suresh Reddy76a9e082014-01-15 13:23:40 +05303648 be_cmd_get_acpi_wol_cap(adapter);
3649
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003650 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003651
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003652 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3653 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003654 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003655
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303656 if (be_physfn(adapter))
3657 be_cmd_set_logical_link_config(adapter,
3658 IFLA_VF_LINK_STATE_AUTO, 0);
3659
Vasundhara Volambec84e62014-06-30 13:01:32 +05303660 if (adapter->num_vfs)
3661 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003662
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003663 status = be_cmd_get_phy_info(adapter);
3664 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003665 adapter->phy.fc_autoneg = 1;
3666
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303667 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303668 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003669 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003670err:
3671 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003672 return status;
3673}
3674
Ivan Vecera66268732011-12-08 01:31:21 +00003675#ifdef CONFIG_NET_POLL_CONTROLLER
3676static void be_netpoll(struct net_device *netdev)
3677{
3678 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003679 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003680 int i;
3681
Sathya Perlae49cc342012-11-27 19:50:02 +00003682 for_all_evt_queues(adapter, eqo, i) {
3683 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3684 napi_schedule(&eqo->napi);
3685 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686
3687 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003688}
3689#endif
3690
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303691static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003692
Sathya Perla306f1342011-08-02 19:57:45 +00003693static bool phy_flashing_required(struct be_adapter *adapter)
3694{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003695 return (adapter->phy.phy_type == TN_8022 &&
3696 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003697}
3698
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003699static bool is_comp_in_ufi(struct be_adapter *adapter,
3700 struct flash_section_info *fsec, int type)
3701{
3702 int i = 0, img_type = 0;
3703 struct flash_section_info_g2 *fsec_g2 = NULL;
3704
Sathya Perlaca34fe32012-11-06 17:48:56 +00003705 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003706 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3707
3708 for (i = 0; i < MAX_FLASH_COMP; i++) {
3709 if (fsec_g2)
3710 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3711 else
3712 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3713
3714 if (img_type == type)
3715 return true;
3716 }
3717 return false;
3718
3719}
3720
Jingoo Han4188e7d2013-08-05 18:02:02 +09003721static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303722 int header_size,
3723 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003724{
3725 struct flash_section_info *fsec = NULL;
3726 const u8 *p = fw->data;
3727
3728 p += header_size;
3729 while (p < (fw->data + fw->size)) {
3730 fsec = (struct flash_section_info *)p;
3731 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3732 return fsec;
3733 p += 32;
3734 }
3735 return NULL;
3736}
3737
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303738static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3739 u32 img_offset, u32 img_size, int hdr_size,
3740 u16 img_optype, bool *crc_match)
3741{
3742 u32 crc_offset;
3743 int status;
3744 u8 crc[4];
3745
3746 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3747 if (status)
3748 return status;
3749
3750 crc_offset = hdr_size + img_offset + img_size - 4;
3751
3752 /* Skip flashing, if crc of flashed region matches */
3753 if (!memcmp(crc, p + crc_offset, 4))
3754 *crc_match = true;
3755 else
3756 *crc_match = false;
3757
3758 return status;
3759}
3760
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003761static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303762 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003763{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003764 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303765 u32 total_bytes, flash_op, num_bytes;
3766 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003767
3768 total_bytes = img_size;
3769 while (total_bytes) {
3770 num_bytes = min_t(u32, 32*1024, total_bytes);
3771
3772 total_bytes -= num_bytes;
3773
3774 if (!total_bytes) {
3775 if (optype == OPTYPE_PHY_FW)
3776 flash_op = FLASHROM_OPER_PHY_FLASH;
3777 else
3778 flash_op = FLASHROM_OPER_FLASH;
3779 } else {
3780 if (optype == OPTYPE_PHY_FW)
3781 flash_op = FLASHROM_OPER_PHY_SAVE;
3782 else
3783 flash_op = FLASHROM_OPER_SAVE;
3784 }
3785
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003786 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003787 img += num_bytes;
3788 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303789 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303790 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303791 optype == OPTYPE_PHY_FW)
3792 break;
3793 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003794 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003795 }
3796 return 0;
3797}
3798
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003799/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003800static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303801 const struct firmware *fw,
3802 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003803{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003804 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303805 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003806 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303807 int status, i, filehdr_size, num_comp;
3808 const struct flash_comp *pflashcomp;
3809 bool crc_match;
3810 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003811
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003812 struct flash_comp gen3_flash_types[] = {
3813 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3814 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3815 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3816 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3817 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3818 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3819 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3820 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3821 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3822 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3823 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3824 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3825 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3826 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3827 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3828 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3829 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3830 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3831 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3832 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003833 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003834
3835 struct flash_comp gen2_flash_types[] = {
3836 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3837 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3838 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3839 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3840 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3841 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3842 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3843 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3844 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3845 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3846 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3847 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3848 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3849 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3850 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3851 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003852 };
3853
Sathya Perlaca34fe32012-11-06 17:48:56 +00003854 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003855 pflashcomp = gen3_flash_types;
3856 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003857 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003858 } else {
3859 pflashcomp = gen2_flash_types;
3860 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003861 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003862 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003863
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003864 /* Get flash section info*/
3865 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3866 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303867 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003868 return -1;
3869 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003870 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003871 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003872 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003873
3874 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3875 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3876 continue;
3877
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003878 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3879 !phy_flashing_required(adapter))
3880 continue;
3881
3882 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303883 status = be_check_flash_crc(adapter, fw->data,
3884 pflashcomp[i].offset,
3885 pflashcomp[i].size,
3886 filehdr_size +
3887 img_hdrs_size,
3888 OPTYPE_REDBOOT, &crc_match);
3889 if (status) {
3890 dev_err(dev,
3891 "Could not get CRC for 0x%x region\n",
3892 pflashcomp[i].optype);
3893 continue;
3894 }
3895
3896 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003897 continue;
3898 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003899
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303900 p = fw->data + filehdr_size + pflashcomp[i].offset +
3901 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003902 if (p + pflashcomp[i].size > fw->data + fw->size)
3903 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003904
3905 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303906 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003907 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303908 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003909 pflashcomp[i].img_type);
3910 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003911 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003912 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003913 return 0;
3914}
3915
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303916static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3917{
3918 u32 img_type = le32_to_cpu(fsec_entry.type);
3919 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3920
3921 if (img_optype != 0xFFFF)
3922 return img_optype;
3923
3924 switch (img_type) {
3925 case IMAGE_FIRMWARE_iSCSI:
3926 img_optype = OPTYPE_ISCSI_ACTIVE;
3927 break;
3928 case IMAGE_BOOT_CODE:
3929 img_optype = OPTYPE_REDBOOT;
3930 break;
3931 case IMAGE_OPTION_ROM_ISCSI:
3932 img_optype = OPTYPE_BIOS;
3933 break;
3934 case IMAGE_OPTION_ROM_PXE:
3935 img_optype = OPTYPE_PXE_BIOS;
3936 break;
3937 case IMAGE_OPTION_ROM_FCoE:
3938 img_optype = OPTYPE_FCOE_BIOS;
3939 break;
3940 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3941 img_optype = OPTYPE_ISCSI_BACKUP;
3942 break;
3943 case IMAGE_NCSI:
3944 img_optype = OPTYPE_NCSI_FW;
3945 break;
3946 case IMAGE_FLASHISM_JUMPVECTOR:
3947 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3948 break;
3949 case IMAGE_FIRMWARE_PHY:
3950 img_optype = OPTYPE_SH_PHY_FW;
3951 break;
3952 case IMAGE_REDBOOT_DIR:
3953 img_optype = OPTYPE_REDBOOT_DIR;
3954 break;
3955 case IMAGE_REDBOOT_CONFIG:
3956 img_optype = OPTYPE_REDBOOT_CONFIG;
3957 break;
3958 case IMAGE_UFI_DIR:
3959 img_optype = OPTYPE_UFI_DIR;
3960 break;
3961 default:
3962 break;
3963 }
3964
3965 return img_optype;
3966}
3967
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003968static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303969 const struct firmware *fw,
3970 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003971{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003972 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303973 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003974 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303975 u32 img_offset, img_size, img_type;
3976 int status, i, filehdr_size;
3977 bool crc_match, old_fw_img;
3978 u16 img_optype;
3979 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003980
3981 filehdr_size = sizeof(struct flash_file_hdr_g3);
3982 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3983 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303984 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05303985 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003986 }
3987
3988 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3989 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3990 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303991 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3992 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3993 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003994
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303995 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003996 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303997 /* Don't bother verifying CRC if an old FW image is being
3998 * flashed
3999 */
4000 if (old_fw_img)
4001 goto flash;
4002
4003 status = be_check_flash_crc(adapter, fw->data, img_offset,
4004 img_size, filehdr_size +
4005 img_hdrs_size, img_optype,
4006 &crc_match);
4007 /* The current FW image on the card does not recognize the new
4008 * FLASH op_type. The FW download is partially complete.
4009 * Reboot the server now to enable FW image to recognize the
4010 * new FLASH op_type. To complete the remaining process,
4011 * download the same FW again after the reboot.
4012 */
Kalesh AP4c600052014-05-30 19:06:26 +05304013 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4014 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304015 dev_err(dev, "Flash incomplete. Reset the server\n");
4016 dev_err(dev, "Download FW image again after reset\n");
4017 return -EAGAIN;
4018 } else if (status) {
4019 dev_err(dev, "Could not get CRC for 0x%x region\n",
4020 img_optype);
4021 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004022 }
4023
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304024 if (crc_match)
4025 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004026
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304027flash:
4028 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004029 if (p + img_size > fw->data + fw->size)
4030 return -1;
4031
4032 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304033 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4034 * UFI_DIR region
4035 */
Kalesh AP4c600052014-05-30 19:06:26 +05304036 if (old_fw_img &&
4037 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4038 (img_optype == OPTYPE_UFI_DIR &&
4039 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304040 continue;
4041 } else if (status) {
4042 dev_err(dev, "Flashing section type 0x%x failed\n",
4043 img_type);
4044 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004045 }
4046 }
4047 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004048}
4049
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004050static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304051 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004052{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004053#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4054#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4055 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004056 const u8 *data_ptr = NULL;
4057 u8 *dest_image_ptr = NULL;
4058 size_t image_size = 0;
4059 u32 chunk_size = 0;
4060 u32 data_written = 0;
4061 u32 offset = 0;
4062 int status = 0;
4063 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004064 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004065
4066 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4067 dev_err(&adapter->pdev->dev,
4068 "FW Image not properly aligned. "
4069 "Length must be 4 byte aligned.\n");
4070 status = -EINVAL;
4071 goto lancer_fw_exit;
4072 }
4073
4074 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4075 + LANCER_FW_DOWNLOAD_CHUNK;
4076 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004077 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004078 if (!flash_cmd.va) {
4079 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004080 goto lancer_fw_exit;
4081 }
4082
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004083 dest_image_ptr = flash_cmd.va +
4084 sizeof(struct lancer_cmd_req_write_object);
4085 image_size = fw->size;
4086 data_ptr = fw->data;
4087
4088 while (image_size) {
4089 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4090
4091 /* Copy the image chunk content. */
4092 memcpy(dest_image_ptr, data_ptr, chunk_size);
4093
4094 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004095 chunk_size, offset,
4096 LANCER_FW_DOWNLOAD_LOCATION,
4097 &data_written, &change_status,
4098 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004099 if (status)
4100 break;
4101
4102 offset += data_written;
4103 data_ptr += data_written;
4104 image_size -= data_written;
4105 }
4106
4107 if (!status) {
4108 /* Commit the FW written */
4109 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004110 0, offset,
4111 LANCER_FW_DOWNLOAD_LOCATION,
4112 &data_written, &change_status,
4113 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004114 }
4115
4116 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05304117 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004118 if (status) {
4119 dev_err(&adapter->pdev->dev,
4120 "Firmware load error. "
4121 "Status code: 0x%x Additional Status: 0x%x\n",
4122 status, add_status);
4123 goto lancer_fw_exit;
4124 }
4125
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004126 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05304127 dev_info(&adapter->pdev->dev,
4128 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004129 status = lancer_physdev_ctrl(adapter,
4130 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004131 if (status) {
4132 dev_err(&adapter->pdev->dev,
4133 "Adapter busy for FW reset.\n"
4134 "New FW will not be active.\n");
4135 goto lancer_fw_exit;
4136 }
4137 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05304138 dev_err(&adapter->pdev->dev,
4139 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004140 }
4141
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004142 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4143lancer_fw_exit:
4144 return status;
4145}
4146
Sathya Perlaca34fe32012-11-06 17:48:56 +00004147#define UFI_TYPE2 2
4148#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004149#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004150#define UFI_TYPE4 4
4151static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004152 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004153{
Kalesh APddf11692014-07-17 16:20:28 +05304154 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004155 goto be_get_ufi_exit;
4156
Sathya Perlaca34fe32012-11-06 17:48:56 +00004157 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4158 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004159 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4160 if (fhdr->asic_type_rev == 0x10)
4161 return UFI_TYPE3R;
4162 else
4163 return UFI_TYPE3;
4164 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004165 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004166
4167be_get_ufi_exit:
4168 dev_err(&adapter->pdev->dev,
4169 "UFI and Interface are not compatible for flashing\n");
4170 return -1;
4171}
4172
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004173static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4174{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004175 struct flash_file_hdr_g3 *fhdr3;
4176 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004177 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004178 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004179 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004180
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004181 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004182 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4183 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004184 if (!flash_cmd.va) {
4185 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004186 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004187 }
4188
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004189 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004190 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004191
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004192 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004193
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004194 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4195 for (i = 0; i < num_imgs; i++) {
4196 img_hdr_ptr = (struct image_hdr *)(fw->data +
4197 (sizeof(struct flash_file_hdr_g3) +
4198 i * sizeof(struct image_hdr)));
4199 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004200 switch (ufi_type) {
4201 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004202 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304203 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004204 break;
4205 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004206 status = be_flash_BEx(adapter, fw, &flash_cmd,
4207 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004208 break;
4209 case UFI_TYPE3:
4210 /* Do not flash this ufi on BE3-R cards */
4211 if (adapter->asic_rev < 0x10)
4212 status = be_flash_BEx(adapter, fw,
4213 &flash_cmd,
4214 num_imgs);
4215 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304216 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004217 dev_err(&adapter->pdev->dev,
4218 "Can't load BE3 UFI on BE3R\n");
4219 }
4220 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004221 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004222 }
4223
Sathya Perlaca34fe32012-11-06 17:48:56 +00004224 if (ufi_type == UFI_TYPE2)
4225 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004226 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304227 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004228
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004229 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4230 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004231 if (status) {
4232 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004233 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004234 }
4235
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004236 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004237
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004238be_fw_exit:
4239 return status;
4240}
4241
4242int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4243{
4244 const struct firmware *fw;
4245 int status;
4246
4247 if (!netif_running(adapter->netdev)) {
4248 dev_err(&adapter->pdev->dev,
4249 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304250 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004251 }
4252
4253 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4254 if (status)
4255 goto fw_exit;
4256
4257 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4258
4259 if (lancer_chip(adapter))
4260 status = lancer_fw_download(adapter, fw);
4261 else
4262 status = be_fw_download(adapter, fw);
4263
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004264 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304265 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004266
Ajit Khaparde84517482009-09-04 03:12:16 +00004267fw_exit:
4268 release_firmware(fw);
4269 return status;
4270}
4271
Sathya Perla748b5392014-05-09 13:29:13 +05304272static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004273{
4274 struct be_adapter *adapter = netdev_priv(dev);
4275 struct nlattr *attr, *br_spec;
4276 int rem;
4277 int status = 0;
4278 u16 mode = 0;
4279
4280 if (!sriov_enabled(adapter))
4281 return -EOPNOTSUPP;
4282
4283 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4284
4285 nla_for_each_nested(attr, br_spec, rem) {
4286 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4287 continue;
4288
4289 mode = nla_get_u16(attr);
4290 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4291 return -EINVAL;
4292
4293 status = be_cmd_set_hsw_config(adapter, 0, 0,
4294 adapter->if_handle,
4295 mode == BRIDGE_MODE_VEPA ?
4296 PORT_FWD_TYPE_VEPA :
4297 PORT_FWD_TYPE_VEB);
4298 if (status)
4299 goto err;
4300
4301 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4302 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4303
4304 return status;
4305 }
4306err:
4307 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4308 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4309
4310 return status;
4311}
4312
4313static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304314 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004315{
4316 struct be_adapter *adapter = netdev_priv(dev);
4317 int status = 0;
4318 u8 hsw_mode;
4319
4320 if (!sriov_enabled(adapter))
4321 return 0;
4322
4323 /* BE and Lancer chips support VEB mode only */
4324 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4325 hsw_mode = PORT_FWD_TYPE_VEB;
4326 } else {
4327 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4328 adapter->if_handle, &hsw_mode);
4329 if (status)
4330 return 0;
4331 }
4332
4333 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4334 hsw_mode == PORT_FWD_TYPE_VEPA ?
4335 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4336}
4337
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304338#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304339static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4340 __be16 port)
4341{
4342 struct be_adapter *adapter = netdev_priv(netdev);
4343 struct device *dev = &adapter->pdev->dev;
4344 int status;
4345
4346 if (lancer_chip(adapter) || BEx_chip(adapter))
4347 return;
4348
4349 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4350 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4351 be16_to_cpu(port));
4352 dev_info(dev,
4353 "Only one UDP port supported for VxLAN offloads\n");
4354 return;
4355 }
4356
4357 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4358 OP_CONVERT_NORMAL_TO_TUNNEL);
4359 if (status) {
4360 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4361 goto err;
4362 }
4363
4364 status = be_cmd_set_vxlan_port(adapter, port);
4365 if (status) {
4366 dev_warn(dev, "Failed to add VxLAN port\n");
4367 goto err;
4368 }
4369 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4370 adapter->vxlan_port = port;
4371
4372 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4373 be16_to_cpu(port));
4374 return;
4375err:
4376 be_disable_vxlan_offloads(adapter);
4377 return;
4378}
4379
4380static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4381 __be16 port)
4382{
4383 struct be_adapter *adapter = netdev_priv(netdev);
4384
4385 if (lancer_chip(adapter) || BEx_chip(adapter))
4386 return;
4387
4388 if (adapter->vxlan_port != port)
4389 return;
4390
4391 be_disable_vxlan_offloads(adapter);
4392
4393 dev_info(&adapter->pdev->dev,
4394 "Disabled VxLAN offloads for UDP port %d\n",
4395 be16_to_cpu(port));
4396}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304397#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304398
stephen hemmingere5686ad2012-01-05 19:10:25 +00004399static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004400 .ndo_open = be_open,
4401 .ndo_stop = be_close,
4402 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004403 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004404 .ndo_set_mac_address = be_mac_addr_set,
4405 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004406 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004407 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004408 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4409 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004410 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004411 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004412 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004413 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304414 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004415#ifdef CONFIG_NET_POLL_CONTROLLER
4416 .ndo_poll_controller = be_netpoll,
4417#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004418 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4419 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304420#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304421 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304422#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304423#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304424 .ndo_add_vxlan_port = be_add_vxlan_port,
4425 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304426#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004427};
4428
4429static void be_netdev_init(struct net_device *netdev)
4430{
4431 struct be_adapter *adapter = netdev_priv(netdev);
4432
Sathya Perlac9c47142014-03-27 10:46:19 +05304433 if (skyhawk_chip(adapter)) {
4434 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4435 NETIF_F_TSO | NETIF_F_TSO6 |
4436 NETIF_F_GSO_UDP_TUNNEL;
4437 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4438 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004439 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004440 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004441 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004442 if (be_multi_rxq(adapter))
4443 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004444
4445 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004446 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004447
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004448 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004449 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004450
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004451 netdev->priv_flags |= IFF_UNICAST_FLT;
4452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004453 netdev->flags |= IFF_MULTICAST;
4454
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004455 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004456
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004457 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004458
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004459 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004460}
4461
4462static void be_unmap_pci_bars(struct be_adapter *adapter)
4463{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004464 if (adapter->csr)
4465 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004466 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004467 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004468}
4469
Sathya Perlace66f782012-11-06 17:48:58 +00004470static int db_bar(struct be_adapter *adapter)
4471{
4472 if (lancer_chip(adapter) || !be_physfn(adapter))
4473 return 0;
4474 else
4475 return 4;
4476}
4477
4478static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004479{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004480 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004481 adapter->roce_db.size = 4096;
4482 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4483 db_bar(adapter));
4484 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4485 db_bar(adapter));
4486 }
Parav Pandit045508a2012-03-26 14:27:13 +00004487 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004488}
4489
4490static int be_map_pci_bars(struct be_adapter *adapter)
4491{
4492 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004493
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004494 if (BEx_chip(adapter) && be_physfn(adapter)) {
4495 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304496 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004497 return -ENOMEM;
4498 }
4499
Sathya Perlace66f782012-11-06 17:48:58 +00004500 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304501 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004502 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004503 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004504
4505 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004506 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004507
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004508pci_map_err:
4509 be_unmap_pci_bars(adapter);
4510 return -ENOMEM;
4511}
4512
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004513static void be_ctrl_cleanup(struct be_adapter *adapter)
4514{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004515 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004516
4517 be_unmap_pci_bars(adapter);
4518
4519 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004520 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4521 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004522
Sathya Perla5b8821b2011-08-02 19:57:44 +00004523 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004524 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004525 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4526 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527}
4528
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004529static int be_ctrl_init(struct be_adapter *adapter)
4530{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004531 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4532 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004533 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004534 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004535 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004536
Sathya Perlace66f782012-11-06 17:48:58 +00004537 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4538 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4539 SLI_INTF_FAMILY_SHIFT;
4540 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542 status = be_map_pci_bars(adapter);
4543 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004544 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004545
4546 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004547 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4548 mbox_mem_alloc->size,
4549 &mbox_mem_alloc->dma,
4550 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004551 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004552 status = -ENOMEM;
4553 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004554 }
4555 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4556 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4557 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4558 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004559
Sathya Perla5b8821b2011-08-02 19:57:44 +00004560 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004561 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4562 rx_filter->size, &rx_filter->dma,
4563 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304564 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004565 status = -ENOMEM;
4566 goto free_mbox;
4567 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004568
Ivan Vecera29849612010-12-14 05:43:19 +00004569 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004570 spin_lock_init(&adapter->mcc_lock);
4571 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004572
Suresh Reddy5eeff632014-01-06 13:02:24 +05304573 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004574 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004576
4577free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004578 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4579 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004580
4581unmap_pci_bars:
4582 be_unmap_pci_bars(adapter);
4583
4584done:
4585 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004586}
4587
4588static void be_stats_cleanup(struct be_adapter *adapter)
4589{
Sathya Perla3abcded2010-10-03 22:12:27 -07004590 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004591
4592 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004593 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4594 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004595}
4596
4597static int be_stats_init(struct be_adapter *adapter)
4598{
Sathya Perla3abcded2010-10-03 22:12:27 -07004599 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004600
Sathya Perlaca34fe32012-11-06 17:48:56 +00004601 if (lancer_chip(adapter))
4602 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4603 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004604 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004605 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004606 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004607 else
4608 /* ALL non-BE ASICs */
4609 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004610
Joe Perchesede23fa2013-08-26 22:45:23 -07004611 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4612 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304613 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304614 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004615 return 0;
4616}
4617
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004618static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004619{
4620 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004621
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004622 if (!adapter)
4623 return;
4624
Parav Pandit045508a2012-03-26 14:27:13 +00004625 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004626 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004627
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004628 cancel_delayed_work_sync(&adapter->func_recovery_work);
4629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004630 unregister_netdev(adapter->netdev);
4631
Sathya Perla5fb379e2009-06-18 00:02:59 +00004632 be_clear(adapter);
4633
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004634 /* tell fw we're done with firing cmds */
4635 be_cmd_fw_clean(adapter);
4636
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004637 be_stats_cleanup(adapter);
4638
4639 be_ctrl_cleanup(adapter);
4640
Sathya Perlad6b6d982012-09-05 01:56:48 +00004641 pci_disable_pcie_error_reporting(pdev);
4642
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004643 pci_release_regions(pdev);
4644 pci_disable_device(pdev);
4645
4646 free_netdev(adapter->netdev);
4647}
4648
Sathya Perla39f1d942012-05-08 19:41:24 +00004649static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004650{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304651 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004652
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004653 status = be_cmd_get_cntl_attributes(adapter);
4654 if (status)
4655 return status;
4656
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004657 /* Must be a power of 2 or else MODULO will BUG_ON */
4658 adapter->be_get_temp_freq = 64;
4659
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304660 if (BEx_chip(adapter)) {
4661 level = be_cmd_get_fw_log_level(adapter);
4662 adapter->msg_enable =
4663 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4664 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004665
Sathya Perla92bf14a2013-08-27 16:57:32 +05304666 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004667 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004668}
4669
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004670static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004671{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004672 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004673 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004674
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004675 status = lancer_test_and_set_rdy_state(adapter);
4676 if (status)
4677 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004678
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004679 if (netif_running(adapter->netdev))
4680 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004681
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004682 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004683
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004684 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004685
4686 status = be_setup(adapter);
4687 if (status)
4688 goto err;
4689
4690 if (netif_running(adapter->netdev)) {
4691 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004692 if (status)
4693 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004694 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004695
Somnath Kotur4bebb562013-12-05 12:07:55 +05304696 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004697 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004698err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004699 if (status == -EAGAIN)
4700 dev_err(dev, "Waiting for resource provisioning\n");
4701 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304702 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004703
4704 return status;
4705}
4706
4707static void be_func_recovery_task(struct work_struct *work)
4708{
4709 struct be_adapter *adapter =
4710 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004711 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004712
4713 be_detect_error(adapter);
4714
4715 if (adapter->hw_error && lancer_chip(adapter)) {
4716
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004717 rtnl_lock();
4718 netif_device_detach(adapter->netdev);
4719 rtnl_unlock();
4720
4721 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004722 if (!status)
4723 netif_device_attach(adapter->netdev);
4724 }
4725
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004726 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4727 * no need to attempt further recovery.
4728 */
4729 if (!status || status == -EAGAIN)
4730 schedule_delayed_work(&adapter->func_recovery_work,
4731 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004732}
4733
4734static void be_worker(struct work_struct *work)
4735{
4736 struct be_adapter *adapter =
4737 container_of(work, struct be_adapter, work.work);
4738 struct be_rx_obj *rxo;
4739 int i;
4740
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004741 /* when interrupts are not yet enabled, just reap any pending
4742 * mcc completions */
4743 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004744 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004745 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004746 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004747 goto reschedule;
4748 }
4749
4750 if (!adapter->stats_cmd_sent) {
4751 if (lancer_chip(adapter))
4752 lancer_cmd_get_pport_stats(adapter,
4753 &adapter->stats_cmd);
4754 else
4755 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4756 }
4757
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304758 if (be_physfn(adapter) &&
4759 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004760 be_cmd_get_die_temperature(adapter);
4761
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004762 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304763 /* Replenish RX-queues starved due to memory
4764 * allocation failures.
4765 */
4766 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004767 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004768 }
4769
Sathya Perla2632baf2013-10-01 16:00:00 +05304770 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004771
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004772reschedule:
4773 adapter->work_counter++;
4774 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4775}
4776
Sathya Perla257a3fe2013-06-14 15:54:51 +05304777/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004778static bool be_reset_required(struct be_adapter *adapter)
4779{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304780 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004781}
4782
Sathya Perlad3791422012-09-28 04:39:44 +00004783static char *mc_name(struct be_adapter *adapter)
4784{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304785 char *str = ""; /* default */
4786
4787 switch (adapter->mc_type) {
4788 case UMC:
4789 str = "UMC";
4790 break;
4791 case FLEX10:
4792 str = "FLEX10";
4793 break;
4794 case vNIC1:
4795 str = "vNIC-1";
4796 break;
4797 case nPAR:
4798 str = "nPAR";
4799 break;
4800 case UFP:
4801 str = "UFP";
4802 break;
4803 case vNIC2:
4804 str = "vNIC-2";
4805 break;
4806 default:
4807 str = "";
4808 }
4809
4810 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004811}
4812
4813static inline char *func_name(struct be_adapter *adapter)
4814{
4815 return be_physfn(adapter) ? "PF" : "VF";
4816}
4817
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004818static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004819{
4820 int status = 0;
4821 struct be_adapter *adapter;
4822 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004823 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004824
4825 status = pci_enable_device(pdev);
4826 if (status)
4827 goto do_none;
4828
4829 status = pci_request_regions(pdev, DRV_NAME);
4830 if (status)
4831 goto disable_dev;
4832 pci_set_master(pdev);
4833
Sathya Perla7f640062012-06-05 19:37:20 +00004834 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304835 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004836 status = -ENOMEM;
4837 goto rel_reg;
4838 }
4839 adapter = netdev_priv(netdev);
4840 adapter->pdev = pdev;
4841 pci_set_drvdata(pdev, adapter);
4842 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004843 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004844
Russell King4c15c242013-06-26 23:49:11 +01004845 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004846 if (!status) {
4847 netdev->features |= NETIF_F_HIGHDMA;
4848 } else {
Russell King4c15c242013-06-26 23:49:11 +01004849 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004850 if (status) {
4851 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4852 goto free_netdev;
4853 }
4854 }
4855
Ajit Khapardeea58c182013-10-18 16:06:24 -05004856 if (be_physfn(adapter)) {
4857 status = pci_enable_pcie_error_reporting(pdev);
4858 if (!status)
4859 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4860 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004862 status = be_ctrl_init(adapter);
4863 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004864 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004865
Sathya Perla2243e2e2009-11-22 22:02:03 +00004866 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004867 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004868 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004869 if (status)
4870 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004871 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004872
Sathya Perla39f1d942012-05-08 19:41:24 +00004873 if (be_reset_required(adapter)) {
4874 status = be_cmd_reset_function(adapter);
4875 if (status)
4876 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004877
Kalesh AP2d177be2013-04-28 22:22:29 +00004878 /* Wait for interrupts to quiesce after an FLR */
4879 msleep(100);
4880 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004881
4882 /* Allow interrupts for other ULPs running on NIC function */
4883 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004884
Kalesh AP2d177be2013-04-28 22:22:29 +00004885 /* tell fw we're ready to fire cmds */
4886 status = be_cmd_fw_init(adapter);
4887 if (status)
4888 goto ctrl_clean;
4889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004890 status = be_stats_init(adapter);
4891 if (status)
4892 goto ctrl_clean;
4893
Sathya Perla39f1d942012-05-08 19:41:24 +00004894 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004895 if (status)
4896 goto stats_clean;
4897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004898 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004899 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004900 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004901
Sathya Perla5fb379e2009-06-18 00:02:59 +00004902 status = be_setup(adapter);
4903 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004904 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004905
Sathya Perla3abcded2010-10-03 22:12:27 -07004906 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004907 status = register_netdev(netdev);
4908 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004909 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004910
Parav Pandit045508a2012-03-26 14:27:13 +00004911 be_roce_dev_add(adapter);
4912
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004913 schedule_delayed_work(&adapter->func_recovery_work,
4914 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004915
4916 be_cmd_query_port_name(adapter, &port_name);
4917
Sathya Perlad3791422012-09-28 04:39:44 +00004918 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4919 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004921 return 0;
4922
Sathya Perla5fb379e2009-06-18 00:02:59 +00004923unsetup:
4924 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004925stats_clean:
4926 be_stats_cleanup(adapter);
4927ctrl_clean:
4928 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004929free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004930 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004931rel_reg:
4932 pci_release_regions(pdev);
4933disable_dev:
4934 pci_disable_device(pdev);
4935do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004936 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004937 return status;
4938}
4939
4940static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4941{
4942 struct be_adapter *adapter = pci_get_drvdata(pdev);
4943 struct net_device *netdev = adapter->netdev;
4944
Suresh Reddy76a9e082014-01-15 13:23:40 +05304945 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004946 be_setup_wol(adapter, true);
4947
Ajit Khaparded4360d62013-11-22 12:51:09 -06004948 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004949 cancel_delayed_work_sync(&adapter->func_recovery_work);
4950
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004951 netif_device_detach(netdev);
4952 if (netif_running(netdev)) {
4953 rtnl_lock();
4954 be_close(netdev);
4955 rtnl_unlock();
4956 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004957 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004958
4959 pci_save_state(pdev);
4960 pci_disable_device(pdev);
4961 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4962 return 0;
4963}
4964
4965static int be_resume(struct pci_dev *pdev)
4966{
4967 int status = 0;
4968 struct be_adapter *adapter = pci_get_drvdata(pdev);
4969 struct net_device *netdev = adapter->netdev;
4970
4971 netif_device_detach(netdev);
4972
4973 status = pci_enable_device(pdev);
4974 if (status)
4975 return status;
4976
Yijing Wang1ca01512013-06-27 20:53:42 +08004977 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004978 pci_restore_state(pdev);
4979
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304980 status = be_fw_wait_ready(adapter);
4981 if (status)
4982 return status;
4983
Ajit Khaparded4360d62013-11-22 12:51:09 -06004984 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004985 /* tell fw we're ready to fire cmds */
4986 status = be_cmd_fw_init(adapter);
4987 if (status)
4988 return status;
4989
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004990 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004991 if (netif_running(netdev)) {
4992 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004993 be_open(netdev);
4994 rtnl_unlock();
4995 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004996
4997 schedule_delayed_work(&adapter->func_recovery_work,
4998 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004999 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005000
Suresh Reddy76a9e082014-01-15 13:23:40 +05305001 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005002 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005003
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004 return 0;
5005}
5006
Sathya Perla82456b02010-02-17 01:35:37 +00005007/*
5008 * An FLR will stop BE from DMAing any data.
5009 */
5010static void be_shutdown(struct pci_dev *pdev)
5011{
5012 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005013
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005014 if (!adapter)
5015 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005016
Sathya Perla0f4a6822011-03-21 20:49:28 +00005017 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005018 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005019
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005020 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005021
Ajit Khaparde57841862011-04-06 18:08:43 +00005022 be_cmd_reset_function(adapter);
5023
Sathya Perla82456b02010-02-17 01:35:37 +00005024 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005025}
5026
Sathya Perlacf588472010-02-14 21:22:01 +00005027static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305028 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005029{
5030 struct be_adapter *adapter = pci_get_drvdata(pdev);
5031 struct net_device *netdev = adapter->netdev;
5032
5033 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5034
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005035 if (!adapter->eeh_error) {
5036 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005037
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005038 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005039
Sathya Perlacf588472010-02-14 21:22:01 +00005040 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005041 netif_device_detach(netdev);
5042 if (netif_running(netdev))
5043 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005044 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005045
5046 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005047 }
Sathya Perlacf588472010-02-14 21:22:01 +00005048
5049 if (state == pci_channel_io_perm_failure)
5050 return PCI_ERS_RESULT_DISCONNECT;
5051
5052 pci_disable_device(pdev);
5053
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005054 /* The error could cause the FW to trigger a flash debug dump.
5055 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005056 * can cause it not to recover; wait for it to finish.
5057 * Wait only for first function as it is needed only once per
5058 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005059 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005060 if (pdev->devfn == 0)
5061 ssleep(30);
5062
Sathya Perlacf588472010-02-14 21:22:01 +00005063 return PCI_ERS_RESULT_NEED_RESET;
5064}
5065
5066static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5067{
5068 struct be_adapter *adapter = pci_get_drvdata(pdev);
5069 int status;
5070
5071 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005072
5073 status = pci_enable_device(pdev);
5074 if (status)
5075 return PCI_ERS_RESULT_DISCONNECT;
5076
5077 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005078 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005079 pci_restore_state(pdev);
5080
5081 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005082 dev_info(&adapter->pdev->dev,
5083 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005084 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005085 if (status)
5086 return PCI_ERS_RESULT_DISCONNECT;
5087
Sathya Perlad6b6d982012-09-05 01:56:48 +00005088 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005089 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005090 return PCI_ERS_RESULT_RECOVERED;
5091}
5092
5093static void be_eeh_resume(struct pci_dev *pdev)
5094{
5095 int status = 0;
5096 struct be_adapter *adapter = pci_get_drvdata(pdev);
5097 struct net_device *netdev = adapter->netdev;
5098
5099 dev_info(&adapter->pdev->dev, "EEH resume\n");
5100
5101 pci_save_state(pdev);
5102
Kalesh AP2d177be2013-04-28 22:22:29 +00005103 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005104 if (status)
5105 goto err;
5106
Kalesh AP03a58ba2014-05-13 14:03:11 +05305107 /* On some BE3 FW versions, after a HW reset,
5108 * interrupts will remain disabled for each function.
5109 * So, explicitly enable interrupts
5110 */
5111 be_intr_set(adapter, true);
5112
Kalesh AP2d177be2013-04-28 22:22:29 +00005113 /* tell fw we're ready to fire cmds */
5114 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005115 if (status)
5116 goto err;
5117
Sathya Perlacf588472010-02-14 21:22:01 +00005118 status = be_setup(adapter);
5119 if (status)
5120 goto err;
5121
5122 if (netif_running(netdev)) {
5123 status = be_open(netdev);
5124 if (status)
5125 goto err;
5126 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005127
5128 schedule_delayed_work(&adapter->func_recovery_work,
5129 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005130 netif_device_attach(netdev);
5131 return;
5132err:
5133 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005134}
5135
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005136static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005137 .error_detected = be_eeh_err_detected,
5138 .slot_reset = be_eeh_reset,
5139 .resume = be_eeh_resume,
5140};
5141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005142static struct pci_driver be_driver = {
5143 .name = DRV_NAME,
5144 .id_table = be_dev_ids,
5145 .probe = be_probe,
5146 .remove = be_remove,
5147 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005148 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005149 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005150 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005151};
5152
5153static int __init be_init_module(void)
5154{
Joe Perches8e95a202009-12-03 07:58:21 +00005155 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5156 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005157 printk(KERN_WARNING DRV_NAME
5158 " : Module param rx_frag_size must be 2048/4096/8192."
5159 " Using 2048\n");
5160 rx_frag_size = 2048;
5161 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005162
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005163 return pci_register_driver(&be_driver);
5164}
5165module_init(be_init_module);
5166
5167static void __exit be_exit_module(void)
5168{
5169 pci_unregister_driver(&be_driver);
5170}
5171module_exit(be_exit_module);