blob: f059b62d29b533741338d1340e5b04e12edb93dd [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530749 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530751 proto = skb_inner_ip_proto(skb);
752 } else {
753 proto = skb_ip_proto(skb);
754 }
755 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 }
760
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700761 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
771 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530775 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000776{
777 dma_addr_t dma;
778
779 be_dws_le_to_cpu(wrb, sizeof(*wrb));
780
781 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000782 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000783 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000784 dma_unmap_single(dev, dma, wrb->frag_len,
785 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000786 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000787 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 }
789}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790
Sathya Perla3c8def92011-06-12 20:01:58 +0000791static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530792 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
793 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794{
Sathya Perla7101e112010-03-22 20:41:12 +0000795 dma_addr_t busaddr;
796 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000797 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct be_eth_wrb *wrb;
800 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000801 bool map_single = false;
802 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 hdr = queue_head_node(txq);
805 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000806 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
David S. Millerebc8d2a2009-06-09 01:01:31 -0700808 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700809 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000812 goto dma_err;
813 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 wrb = queue_head_node(txq);
815 wrb_fill(wrb, busaddr, len);
816 be_dws_cpu_to_le(wrb, sizeof(*wrb));
817 queue_head_inc(txq);
818 copied += len;
819 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000823 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000824 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000825 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000826 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700827 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000828 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700829 be_dws_cpu_to_le(wrb, sizeof(*wrb));
830 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000831 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 }
833
834 if (dummy_wrb) {
835 wrb = queue_head_node(txq);
836 wrb_fill(wrb, 0, 0);
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
839 }
840
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 be_dws_cpu_to_le(hdr, sizeof(*hdr));
843
844 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000845dma_err:
846 txq->head = map_head;
847 while (copied) {
848 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000849 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000850 map_single = false;
851 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530852 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301076 struct device *dev = &adapter->pdev->dev;
1077
1078 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1079 dev_info(dev, "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 return -EINVAL;
1082 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301083
1084 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Vasundhara Volam50762662014-09-12 17:39:14 +05301096 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001097 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301098 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001099 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001100
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001101 /* No need to further configure vids if in promiscuous mode */
1102 if (adapter->promiscuous)
1103 return 0;
1104
Sathya Perla92bf14a2013-08-27 16:57:32 +05301105 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001106 goto set_vlan_promisc;
1107
1108 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301109 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1110 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001111
Kalesh AP4d567d92014-05-09 13:29:17 +05301112 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001113 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001114 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301115 if (addl_status(status) ==
1116 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001117 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301118 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301125 dev_info(dev,
1126 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301140 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301143 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001144 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145}
1146
Patrick McHardy80d5c362013-04-19 02:04:28 +00001147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001150 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301154 return status;
1155
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301156 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301157 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001158
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301159 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301160 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001161
Somnath Kotura6b74e02014-01-21 15:50:55 +05301162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301165 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301166 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301167
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001168 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169}
1170
Patrick McHardy80d5c362013-04-19 02:04:28 +00001171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
1174
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001175 /* Packets with VID 0 are always received by Lancer by default */
1176 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301177 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001178
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301179 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301180 adapter->vlans_added--;
1181
1182 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183}
1184
Somnath kotur7ad09452014-03-03 14:24:43 +05301185static void be_clear_promisc(struct be_adapter *adapter)
1186{
1187 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301188 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301189
1190 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1191}
1192
Sathya Perlaa54769f2011-10-24 02:45:00 +00001193static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194{
1195 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001196 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
1198 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001199 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001200 adapter->promiscuous = true;
1201 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001203
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001204 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001205 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301206 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001207 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001208 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001209 }
1210
Sathya Perlae7b909a2009-11-22 22:01:10 +00001211 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001212 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301213 netdev_mc_count(netdev) > be_max_mc(adapter))
1214 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001215
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001216 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1217 struct netdev_hw_addr *ha;
1218 int i = 1; /* First slot is claimed by the Primary MAC */
1219
1220 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1221 be_cmd_pmac_del(adapter, adapter->if_handle,
1222 adapter->pmac_id[i], 0);
1223 }
1224
Sathya Perla92bf14a2013-08-27 16:57:32 +05301225 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001226 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1227 adapter->promiscuous = true;
1228 goto done;
1229 }
1230
1231 netdev_for_each_uc_addr(ha, adapter->netdev) {
1232 adapter->uc_macs++; /* First slot is for Primary MAC */
1233 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1234 adapter->if_handle,
1235 &adapter->pmac_id[adapter->uc_macs], 0);
1236 }
1237 }
1238
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001239 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301240 if (!status) {
1241 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1242 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1243 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001244 }
Kalesh APa0794882014-05-30 19:06:23 +05301245
1246set_mcast_promisc:
1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1248 return;
1249
1250 /* Set to MCAST promisc mode if setting MULTICAST address fails
1251 * or if num configured exceeds what we support
1252 */
1253 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1254 if (!status)
1255 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001256done:
1257 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258}
1259
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001264 int status;
1265
Sathya Perla11ac75e2011-12-13 00:58:50 +00001266 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001267 return -EPERM;
1268
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001270 return -EINVAL;
1271
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301272 /* Proceed further only if user provided MAC is different
1273 * from active MAC
1274 */
1275 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1276 return 0;
1277
Sathya Perla3175d8c2013-07-23 15:25:03 +05301278 if (BEx_chip(adapter)) {
1279 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1280 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001281
Sathya Perla11ac75e2011-12-13 00:58:50 +00001282 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1283 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301284 } else {
1285 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1286 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001287 }
1288
Kalesh APabccf232014-07-17 16:20:24 +05301289 if (status) {
1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1291 mac, vf, status);
1292 return be_cmd_status(status);
1293 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001294
Kalesh APabccf232014-07-17 16:20:24 +05301295 ether_addr_copy(vf_cfg->mac_addr, mac);
1296
1297 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001298}
1299
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001300static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301301 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001304 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001305
Sathya Perla11ac75e2011-12-13 00:58:50 +00001306 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001307 return -EPERM;
1308
Sathya Perla11ac75e2011-12-13 00:58:50 +00001309 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001310 return -EINVAL;
1311
1312 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001313 vi->max_tx_rate = vf_cfg->tx_rate;
1314 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001315 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1316 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001317 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301318 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319
1320 return 0;
1321}
1322
Sathya Perla748b5392014-05-09 13:29:13 +05301323static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001324{
1325 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001326 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001327 int status = 0;
1328
Sathya Perla11ac75e2011-12-13 00:58:50 +00001329 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001330 return -EPERM;
1331
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001332 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001333 return -EINVAL;
1334
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001335 if (vlan || qos) {
1336 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301337 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001338 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1339 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001340 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001341 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301342 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1343 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001344 }
1345
Kalesh APabccf232014-07-17 16:20:24 +05301346 if (status) {
1347 dev_err(&adapter->pdev->dev,
1348 "VLAN %d config on VF %d failed : %#x\n", vlan,
1349 vf, status);
1350 return be_cmd_status(status);
1351 }
1352
1353 vf_cfg->vlan_tag = vlan;
1354
1355 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001356}
1357
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001358static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1359 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001360{
1361 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301362 struct device *dev = &adapter->pdev->dev;
1363 int percent_rate, status = 0;
1364 u16 link_speed = 0;
1365 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001366
Sathya Perla11ac75e2011-12-13 00:58:50 +00001367 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368 return -EPERM;
1369
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001370 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001371 return -EINVAL;
1372
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001373 if (min_tx_rate)
1374 return -EINVAL;
1375
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301376 if (!max_tx_rate)
1377 goto config_qos;
1378
1379 status = be_cmd_link_status_query(adapter, &link_speed,
1380 &link_status, 0);
1381 if (status)
1382 goto err;
1383
1384 if (!link_status) {
1385 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301386 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301387 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001388 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001389
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301390 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1391 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1392 link_speed);
1393 status = -EINVAL;
1394 goto err;
1395 }
1396
1397 /* On Skyhawk the QOS setting must be done only as a % value */
1398 percent_rate = link_speed / 100;
1399 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1400 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1401 percent_rate);
1402 status = -EINVAL;
1403 goto err;
1404 }
1405
1406config_qos:
1407 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001408 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301409 goto err;
1410
1411 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1412 return 0;
1413
1414err:
1415 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1416 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301417 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001418}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301419static int be_set_vf_link_state(struct net_device *netdev, int vf,
1420 int link_state)
1421{
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 int status;
1424
1425 if (!sriov_enabled(adapter))
1426 return -EPERM;
1427
1428 if (vf >= adapter->num_vfs)
1429 return -EINVAL;
1430
1431 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301432 if (status) {
1433 dev_err(&adapter->pdev->dev,
1434 "Link state change on VF %d failed: %#x\n", vf, status);
1435 return be_cmd_status(status);
1436 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301437
Kalesh APabccf232014-07-17 16:20:24 +05301438 adapter->vf_cfg[vf].plink_tracking = link_state;
1439
1440 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301441}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001442
Sathya Perla2632baf2013-10-01 16:00:00 +05301443static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1444 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perla2632baf2013-10-01 16:00:00 +05301446 aic->rx_pkts_prev = rx_pkts;
1447 aic->tx_reqs_prev = tx_pkts;
1448 aic->jiffies = now;
1449}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001450
Sathya Perla2632baf2013-10-01 16:00:00 +05301451static void be_eqd_update(struct be_adapter *adapter)
1452{
1453 struct be_set_eqd set_eqd[MAX_EVT_QS];
1454 int eqd, i, num = 0, start;
1455 struct be_aic_obj *aic;
1456 struct be_eq_obj *eqo;
1457 struct be_rx_obj *rxo;
1458 struct be_tx_obj *txo;
1459 u64 rx_pkts, tx_pkts;
1460 ulong now;
1461 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001462
Sathya Perla2632baf2013-10-01 16:00:00 +05301463 for_all_evt_queues(adapter, eqo, i) {
1464 aic = &adapter->aic_obj[eqo->idx];
1465 if (!aic->enable) {
1466 if (aic->jiffies)
1467 aic->jiffies = 0;
1468 eqd = aic->et_eqd;
1469 goto modify_eqd;
1470 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Sathya Perla2632baf2013-10-01 16:00:00 +05301472 rxo = &adapter->rx_obj[eqo->idx];
1473 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001474 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301475 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001476 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477
Sathya Perla2632baf2013-10-01 16:00:00 +05301478 txo = &adapter->tx_obj[eqo->idx];
1479 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001480 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301481 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001482 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001483
Sathya Perla4097f662009-03-24 16:40:13 -07001484
Sathya Perla2632baf2013-10-01 16:00:00 +05301485 /* Skip, if wrapped around or first calculation */
1486 now = jiffies;
1487 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1488 rx_pkts < aic->rx_pkts_prev ||
1489 tx_pkts < aic->tx_reqs_prev) {
1490 be_aic_update(aic, rx_pkts, tx_pkts, now);
1491 continue;
1492 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001493
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 delta = jiffies_to_msecs(now - aic->jiffies);
1495 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1496 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1497 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498
Sathya Perla2632baf2013-10-01 16:00:00 +05301499 if (eqd < 8)
1500 eqd = 0;
1501 eqd = min_t(u32, eqd, aic->max_eqd);
1502 eqd = max_t(u32, eqd, aic->min_eqd);
1503
1504 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301506 if (eqd != aic->prev_eqd) {
1507 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1508 set_eqd[num].eq_id = eqo->q.id;
1509 aic->prev_eqd = eqd;
1510 num++;
1511 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001512 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301513
1514 if (num)
1515 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001516}
1517
Sathya Perla3abcded2010-10-03 22:12:27 -07001518static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301519 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001520{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001521 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001522
Sathya Perlaab1594e2011-07-25 19:10:15 +00001523 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001524 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001525 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001526 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001528 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001530 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001531 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532}
1533
Sathya Perla2e588f82011-03-11 02:49:26 +00001534static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001535{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001536 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301537 * Also ignore ipcksm for ipv6 pkts
1538 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301540 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001541}
1542
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301543static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001545 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301548 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549
Sathya Perla3abcded2010-10-03 22:12:27 -07001550 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 BUG_ON(!rx_page_info->page);
1552
Sathya Perlae50287b2014-03-04 12:14:38 +05301553 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001554 dma_unmap_page(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301557 rx_page_info->last_frag = false;
1558 } else {
1559 dma_sync_single_for_cpu(&adapter->pdev->dev,
1560 dma_unmap_addr(rx_page_info, bus),
1561 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001562 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301564 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 atomic_dec(&rxq->used);
1566 return rx_page_info;
1567}
1568
1569/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570static void be_rx_compl_discard(struct be_rx_obj *rxo,
1571 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001574 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001576 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301577 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001578 put_page(page_info->page);
1579 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 }
1581}
1582
1583/*
1584 * skb_fill_rx_data forms a complete skb for an ether frame
1585 * indicated by rxcp.
1586 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001587static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1588 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001591 u16 i, j;
1592 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 u8 *start;
1594
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301595 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 start = page_address(page_info->page) + page_info->page_offset;
1597 prefetch(start);
1598
1599 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001600 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 skb->len = curr_frag_len;
1603 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001604 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 /* Complete packet has now been moved to data */
1606 put_page(page_info->page);
1607 skb->data_len = 0;
1608 skb->tail += curr_frag_len;
1609 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001610 hdr_len = ETH_HLEN;
1611 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001613 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 skb_shinfo(skb)->frags[0].page_offset =
1615 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301616 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1617 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001619 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 skb->tail += hdr_len;
1621 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001622 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
Sathya Perla2e588f82011-03-11 02:49:26 +00001624 if (rxcp->pkt_size <= rx_frag_size) {
1625 BUG_ON(rxcp->num_rcvd != 1);
1626 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 }
1628
1629 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001630 remaining = rxcp->pkt_size - curr_frag_len;
1631 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301632 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001633 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001635 /* Coalesce all frags from the same physical page in one slot */
1636 if (page_info->page_offset == 0) {
1637 /* Fresh page */
1638 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001639 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001640 skb_shinfo(skb)->frags[j].page_offset =
1641 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001642 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001643 skb_shinfo(skb)->nr_frags++;
1644 } else {
1645 put_page(page_info->page);
1646 }
1647
Eric Dumazet9e903e02011-10-18 21:00:24 +00001648 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 skb->len += curr_frag_len;
1650 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001651 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001653 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001655 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656}
1657
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001658/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301659static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001660 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001663 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001665
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001666 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001667 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001668 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001669 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 return;
1671 }
1672
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001673 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001675 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001676 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001677 else
1678 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001680 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001681 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001682 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001683 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301684
Tom Herbertb6c0e892014-08-27 21:27:17 -07001685 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301686 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Jiri Pirko343e43c2011-08-25 02:50:51 +00001688 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001690
1691 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692}
1693
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001694/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001695static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1696 struct napi_struct *napi,
1697 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001699 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001701 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001702 u16 remaining, curr_frag_len;
1703 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001706 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001707 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708 return;
1709 }
1710
Sathya Perla2e588f82011-03-11 02:49:26 +00001711 remaining = rxcp->pkt_size;
1712 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301713 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714
1715 curr_frag_len = min(remaining, rx_frag_size);
1716
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001717 /* Coalesce all frags from the same physical page in one slot */
1718 if (i == 0 || page_info->page_offset == 0) {
1719 /* First frag or Fresh page */
1720 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001721 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001722 skb_shinfo(skb)->frags[j].page_offset =
1723 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001724 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001725 } else {
1726 put_page(page_info->page);
1727 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001728 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001729 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 memset(page_info, 0, sizeof(*page_info));
1732 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001733 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001735 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001736 skb->len = rxcp->pkt_size;
1737 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001738 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001739 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001740 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001741 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301742
Tom Herbertb6c0e892014-08-27 21:27:17 -07001743 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301744 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001745
Jiri Pirko343e43c2011-08-25 02:50:51 +00001746 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001747 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001749 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750}
1751
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001752static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1753 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301755 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1756 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1757 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1758 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1759 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1760 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1761 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1762 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1763 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1764 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1765 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001766 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301767 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1768 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001769 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301770 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301771 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301772 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001773}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001775static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1776 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001777{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301778 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1779 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1780 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1781 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1782 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1783 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1784 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1785 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1786 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1787 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1788 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001789 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301790 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1791 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001792 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301793 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1794 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001795}
1796
1797static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1798{
1799 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1800 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1801 struct be_adapter *adapter = rxo->adapter;
1802
1803 /* For checking the valid bit it is Ok to use either definition as the
1804 * valid bit is at the same position in both v0 and v1 Rx compl */
1805 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 return NULL;
1807
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001808 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001809 be_dws_le_to_cpu(compl, sizeof(*compl));
1810
1811 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001812 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001813 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001814 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001815
Somnath Koture38b1702013-05-29 22:55:56 +00001816 if (rxcp->ip_frag)
1817 rxcp->l4_csum = 0;
1818
Sathya Perla15d72182011-03-21 20:49:26 +00001819 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301820 /* In QNQ modes, if qnq bit is not set, then the packet was
1821 * tagged only with the transparent outer vlan-tag and must
1822 * not be treated as a vlan packet by host
1823 */
1824 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001825 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001826
Sathya Perla15d72182011-03-21 20:49:26 +00001827 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001828 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001829
Somnath Kotur939cf302011-08-18 21:51:49 -07001830 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301831 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001832 rxcp->vlanf = 0;
1833 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001834
1835 /* As the compl has been parsed, reset it; we wont touch it again */
1836 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 return rxcp;
1840}
1841
Eric Dumazet1829b082011-03-01 05:48:12 +00001842static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001845
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001847 gfp |= __GFP_COMP;
1848 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849}
1850
1851/*
1852 * Allocate a page, split it to fragments of size rx_frag_size and post as
1853 * receive buffers to BE
1854 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001855static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856{
Sathya Perla3abcded2010-10-03 22:12:27 -07001857 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001858 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001859 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001861 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 struct be_eth_rx_d *rxd;
1863 u64 page_dmaaddr = 0, frag_dmaaddr;
1864 u32 posted, page_offset = 0;
1865
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1868 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001869 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001871 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 break;
1873 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001874 page_dmaaddr = dma_map_page(dev, pagep, 0,
1875 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001876 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001877 if (dma_mapping_error(dev, page_dmaaddr)) {
1878 put_page(pagep);
1879 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301880 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001881 break;
1882 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301883 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 } else {
1885 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301886 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301888 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
1891 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301892 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1894 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895
1896 /* Any space left in the current big page for another frag? */
1897 if ((page_offset + rx_frag_size + rx_frag_size) >
1898 adapter->big_page_size) {
1899 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_info->last_frag = true;
1901 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1902 } else {
1903 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001905
1906 prev_page_info = page_info;
1907 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301910
1911 /* Mark the last frag of a page when we break out of the above loop
1912 * with no more slots available in the RXQ
1913 */
1914 if (pagep) {
1915 prev_page_info->last_frag = true;
1916 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1917 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
1919 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301921 if (rxo->rx_post_starved)
1922 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001923 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001924 } else if (atomic_read(&rxq->used) == 0) {
1925 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001926 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928}
1929
Sathya Perla5fb379e2009-06-18 00:02:59 +00001930static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1933
1934 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1935 return NULL;
1936
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001937 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1939
1940 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1941
1942 queue_tail_inc(tx_cq);
1943 return txcp;
1944}
1945
Sathya Perla3c8def92011-06-12 20:01:58 +00001946static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301947 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948{
Sathya Perla3c8def92011-06-12 20:01:58 +00001949 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001950 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001951 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001953 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1954 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001956 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001958 sent_skbs[txq->tail] = NULL;
1959
1960 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001961 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001963 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001965 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001966 unmap_tx_frag(&adapter->pdev->dev, wrb,
1967 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001968 unmap_skb_hdr = false;
1969
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970 num_wrbs++;
1971 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001972 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973
Rick Jones96d49222014-08-28 08:53:16 -07001974 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001975 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976}
1977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978/* Return the number of events in the event queue */
1979static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001980{
1981 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001983
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001984 do {
1985 eqe = queue_tail_node(&eqo->q);
1986 if (eqe->evt == 0)
1987 break;
1988
1989 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001990 eqe->evt = 0;
1991 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992 queue_tail_inc(&eqo->q);
1993 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001994
1995 return num;
1996}
1997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998/* Leaves the EQ is disarmed state */
1999static void be_eq_clean(struct be_eq_obj *eqo)
2000{
2001 int num = events_get(eqo);
2002
2003 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2004}
2005
2006static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007{
2008 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 struct be_queue_info *rxq = &rxo->q;
2010 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002011 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002012 struct be_adapter *adapter = rxo->adapter;
2013 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014
Sathya Perlad23e9462012-12-17 19:38:51 +00002015 /* Consume pending rx completions.
2016 * Wait for the flush completion (identified by zero num_rcvd)
2017 * to arrive. Notify CQ even when there are no more CQ entries
2018 * for HW to flush partially coalesced CQ entries.
2019 * In Lancer, there is no need to wait for flush compl.
2020 */
2021 for (;;) {
2022 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302023 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002024 if (lancer_chip(adapter))
2025 break;
2026
2027 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2028 dev_warn(&adapter->pdev->dev,
2029 "did not receive flush compl\n");
2030 break;
2031 }
2032 be_cq_notify(adapter, rx_cq->id, true, 0);
2033 mdelay(1);
2034 } else {
2035 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002036 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002037 if (rxcp->num_rcvd == 0)
2038 break;
2039 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040 }
2041
Sathya Perlad23e9462012-12-17 19:38:51 +00002042 /* After cleanup, leave the CQ in unarmed state */
2043 be_cq_notify(adapter, rx_cq->id, false, 0);
2044
2045 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302046 while (atomic_read(&rxq->used) > 0) {
2047 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048 put_page(page_info->page);
2049 memset(page_info, 0, sizeof(*page_info));
2050 }
2051 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002052 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053}
2054
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002055static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002057 struct be_tx_obj *txo;
2058 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002059 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002060 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002061 struct sk_buff *sent_skb;
2062 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002063 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302065 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002066 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002067 pending_txqs = adapter->num_tx_qs;
2068
2069 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302070 cmpl = 0;
2071 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002072 txq = &txo->q;
2073 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302074 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002075 num_wrbs += be_tx_compl_process(adapter, txo,
2076 end_idx);
2077 cmpl++;
2078 }
2079 if (cmpl) {
2080 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2081 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302082 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002083 }
2084 if (atomic_read(&txq->used) == 0)
2085 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002086 }
2087
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302088 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002089 break;
2090
2091 mdelay(1);
2092 } while (true);
2093
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002094 for_all_tx_queues(adapter, txo, i) {
2095 txq = &txo->q;
2096 if (atomic_read(&txq->used))
2097 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2098 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002099
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002100 /* free posted tx for which compls will never arrive */
2101 while (atomic_read(&txq->used)) {
2102 sent_skb = txo->sent_skb_list[txq->tail];
2103 end_idx = txq->tail;
2104 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2105 &dummy_wrb);
2106 index_adv(&end_idx, num_wrbs - 1, txq->len);
2107 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2108 atomic_sub(num_wrbs, &txq->used);
2109 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002110 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111}
2112
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002113static void be_evt_queues_destroy(struct be_adapter *adapter)
2114{
2115 struct be_eq_obj *eqo;
2116 int i;
2117
2118 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002119 if (eqo->q.created) {
2120 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302122 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302123 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002124 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 be_queue_free(adapter, &eqo->q);
2126 }
2127}
2128
2129static int be_evt_queues_create(struct be_adapter *adapter)
2130{
2131 struct be_queue_info *eq;
2132 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302133 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134 int i, rc;
2135
Sathya Perla92bf14a2013-08-27 16:57:32 +05302136 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2137 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138
2139 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302140 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2141 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302142 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302143 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302146 aic->max_eqd = BE_MAX_EQD;
2147 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148
2149 eq = &eqo->q;
2150 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302151 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152 if (rc)
2153 return rc;
2154
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302155 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 if (rc)
2157 return rc;
2158 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002159 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160}
2161
Sathya Perla5fb379e2009-06-18 00:02:59 +00002162static void be_mcc_queues_destroy(struct be_adapter *adapter)
2163{
2164 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002165
Sathya Perla8788fdc2009-07-27 22:52:03 +00002166 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002167 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002168 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002169 be_queue_free(adapter, q);
2170
Sathya Perla8788fdc2009-07-27 22:52:03 +00002171 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002172 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002173 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002174 be_queue_free(adapter, q);
2175}
2176
2177/* Must be called only after TX qs are created as MCC shares TX EQ */
2178static int be_mcc_queues_create(struct be_adapter *adapter)
2179{
2180 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002181
Sathya Perla8788fdc2009-07-27 22:52:03 +00002182 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302184 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185 goto err;
2186
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187 /* Use the default EQ for MCC completions */
2188 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002189 goto mcc_cq_free;
2190
Sathya Perla8788fdc2009-07-27 22:52:03 +00002191 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002192 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2193 goto mcc_cq_destroy;
2194
Sathya Perla8788fdc2009-07-27 22:52:03 +00002195 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196 goto mcc_q_free;
2197
2198 return 0;
2199
2200mcc_q_free:
2201 be_queue_free(adapter, q);
2202mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002203 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002204mcc_cq_free:
2205 be_queue_free(adapter, cq);
2206err:
2207 return -1;
2208}
2209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210static void be_tx_queues_destroy(struct be_adapter *adapter)
2211{
2212 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002213 struct be_tx_obj *txo;
2214 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215
Sathya Perla3c8def92011-06-12 20:01:58 +00002216 for_all_tx_queues(adapter, txo, i) {
2217 q = &txo->q;
2218 if (q->created)
2219 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2220 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Sathya Perla3c8def92011-06-12 20:01:58 +00002222 q = &txo->cq;
2223 if (q->created)
2224 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2225 be_queue_free(adapter, q);
2226 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227}
2228
Sathya Perla77071332013-08-27 16:57:34 +05302229static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002232 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302233 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sathya Perla92bf14a2013-08-27 16:57:32 +05302235 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002236
Sathya Perla3c8def92011-06-12 20:01:58 +00002237 for_all_tx_queues(adapter, txo, i) {
2238 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002239 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2240 sizeof(struct be_eth_tx_compl));
2241 if (status)
2242 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243
John Stultz827da442013-10-07 15:51:58 -07002244 u64_stats_init(&txo->stats.sync);
2245 u64_stats_init(&txo->stats.sync_compl);
2246
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 /* If num_evt_qs is less than num_tx_qs, then more than
2248 * one txq share an eq
2249 */
2250 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2251 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2252 if (status)
2253 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002255 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2256 sizeof(struct be_eth_wrb));
2257 if (status)
2258 return status;
2259
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002260 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 if (status)
2262 return status;
2263 }
2264
Sathya Perlad3791422012-09-28 04:39:44 +00002265 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2266 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 return 0;
2268}
2269
2270static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271{
2272 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002273 struct be_rx_obj *rxo;
2274 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275
Sathya Perla3abcded2010-10-03 22:12:27 -07002276 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 q = &rxo->cq;
2278 if (q->created)
2279 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2280 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282}
2283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002285{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002287 struct be_rx_obj *rxo;
2288 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289
Sathya Perla92bf14a2013-08-27 16:57:32 +05302290 /* We can create as many RSS rings as there are EQs. */
2291 adapter->num_rx_qs = adapter->num_evt_qs;
2292
2293 /* We'll use RSS only if atleast 2 RSS rings are supported.
2294 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302296 if (adapter->num_rx_qs > 1)
2297 adapter->num_rx_qs++;
2298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 for_all_rx_queues(adapter, rxo, i) {
2301 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 cq = &rxo->cq;
2303 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302304 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002305 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307
John Stultz827da442013-10-07 15:51:58 -07002308 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2310 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314
Sathya Perlad3791422012-09-28 04:39:44 +00002315 dev_info(&adapter->pdev->dev,
2316 "created %d RSS queue(s) and 1 default RX queue\n",
2317 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002319}
2320
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321static irqreturn_t be_intx(int irq, void *dev)
2322{
Sathya Perlae49cc342012-11-27 19:50:02 +00002323 struct be_eq_obj *eqo = dev;
2324 struct be_adapter *adapter = eqo->adapter;
2325 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002327 /* IRQ is not expected when NAPI is scheduled as the EQ
2328 * will not be armed.
2329 * But, this can happen on Lancer INTx where it takes
2330 * a while to de-assert INTx or in BE2 where occasionaly
2331 * an interrupt may be raised even when EQ is unarmed.
2332 * If NAPI is already scheduled, then counting & notifying
2333 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002334 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002335 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002336 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002337 __napi_schedule(&eqo->napi);
2338 if (num_evts)
2339 eqo->spurious_intr = 0;
2340 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002341 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002342
2343 /* Return IRQ_HANDLED only for the the first spurious intr
2344 * after a valid intr to stop the kernel from branding
2345 * this irq as a bad one!
2346 */
2347 if (num_evts || eqo->spurious_intr++ == 0)
2348 return IRQ_HANDLED;
2349 else
2350 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351}
2352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356
Sathya Perla0b545a62012-11-23 00:27:18 +00002357 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2358 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359 return IRQ_HANDLED;
2360}
2361
Sathya Perla2e588f82011-03-11 02:49:26 +00002362static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002363{
Somnath Koture38b1702013-05-29 22:55:56 +00002364 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365}
2366
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002367static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302368 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369{
Sathya Perla3abcded2010-10-03 22:12:27 -07002370 struct be_adapter *adapter = rxo->adapter;
2371 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002372 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373 u32 work_done;
2374
2375 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002376 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377 if (!rxcp)
2378 break;
2379
Sathya Perla12004ae2011-08-02 19:57:46 +00002380 /* Is it a flush compl that has no data */
2381 if (unlikely(rxcp->num_rcvd == 0))
2382 goto loop_continue;
2383
2384 /* Discard compl with partial DMA Lancer B0 */
2385 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002387 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002388 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002389
Sathya Perla12004ae2011-08-02 19:57:46 +00002390 /* On BE drop pkts that arrive due to imperfect filtering in
2391 * promiscuous mode on some skews
2392 */
2393 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302394 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002396 goto loop_continue;
2397 }
2398
Sathya Perla6384a4d2013-10-25 10:40:16 +05302399 /* Don't do gro when we're busy_polling */
2400 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002402 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302403 be_rx_compl_process(rxo, napi, rxcp);
2404
Sathya Perla12004ae2011-08-02 19:57:46 +00002405loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002406 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407 }
2408
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409 if (work_done) {
2410 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002411
Sathya Perla6384a4d2013-10-25 10:40:16 +05302412 /* When an rx-obj gets into post_starved state, just
2413 * let be_worker do the posting.
2414 */
2415 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2416 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002419
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420 return work_done;
2421}
2422
Kalesh AP512bb8a2014-09-02 09:56:49 +05302423static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2424{
2425 switch (status) {
2426 case BE_TX_COMP_HDR_PARSE_ERR:
2427 tx_stats(txo)->tx_hdr_parse_err++;
2428 break;
2429 case BE_TX_COMP_NDMA_ERR:
2430 tx_stats(txo)->tx_dma_err++;
2431 break;
2432 case BE_TX_COMP_ACL_ERR:
2433 tx_stats(txo)->tx_spoof_check_err++;
2434 break;
2435 }
2436}
2437
2438static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2439{
2440 switch (status) {
2441 case LANCER_TX_COMP_LSO_ERR:
2442 tx_stats(txo)->tx_tso_err++;
2443 break;
2444 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2445 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2446 tx_stats(txo)->tx_spoof_check_err++;
2447 break;
2448 case LANCER_TX_COMP_QINQ_ERR:
2449 tx_stats(txo)->tx_qinq_err++;
2450 break;
2451 case LANCER_TX_COMP_PARITY_ERR:
2452 tx_stats(txo)->tx_internal_parity_err++;
2453 break;
2454 case LANCER_TX_COMP_DMA_ERR:
2455 tx_stats(txo)->tx_dma_err++;
2456 break;
2457 }
2458}
2459
Sathya Perlac8f64612014-09-02 09:56:55 +05302460static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2461 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002462{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302464 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302465 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302466 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002467
Sathya Perlac8f64612014-09-02 09:56:55 +05302468 while ((txcp = be_tx_compl_get(&txo->cq))) {
2469 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2470 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2471 work_done++;
2472
Kalesh AP512bb8a2014-09-02 09:56:49 +05302473 compl_status = GET_TX_COMPL_BITS(status, txcp);
2474 if (compl_status) {
2475 if (lancer_chip(adapter))
2476 lancer_update_tx_err(txo, compl_status);
2477 else
2478 be_update_tx_err(txo, compl_status);
2479 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002480 }
2481
2482 if (work_done) {
2483 be_cq_notify(adapter, txo->cq.id, true, work_done);
2484 atomic_sub(num_wrbs, &txo->q.used);
2485
2486 /* As Tx wrbs have been freed up, wake up netdev queue
2487 * if it was stopped due to lack of tx wrbs. */
2488 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302489 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002491 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002492
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002493 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2494 tx_stats(txo)->tx_compl += work_done;
2495 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2496 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002497}
Sathya Perla3c8def92011-06-12 20:01:58 +00002498
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302499int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002500{
2501 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2502 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002503 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302504 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302505 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002506
Sathya Perla0b545a62012-11-23 00:27:18 +00002507 num_evts = events_get(eqo);
2508
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302509 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2510 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511
Sathya Perla6384a4d2013-10-25 10:40:16 +05302512 if (be_lock_napi(eqo)) {
2513 /* This loop will iterate twice for EQ0 in which
2514 * completions of the last RXQ (default one) are also processed
2515 * For other EQs the loop iterates only once
2516 */
2517 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2518 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2519 max_work = max(work, max_work);
2520 }
2521 be_unlock_napi(eqo);
2522 } else {
2523 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002524 }
2525
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 if (is_mcc_eqo(eqo))
2527 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002528
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529 if (max_work < budget) {
2530 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002531 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532 } else {
2533 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002534 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002535 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002536 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537}
2538
Sathya Perla6384a4d2013-10-25 10:40:16 +05302539#ifdef CONFIG_NET_RX_BUSY_POLL
2540static int be_busy_poll(struct napi_struct *napi)
2541{
2542 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2543 struct be_adapter *adapter = eqo->adapter;
2544 struct be_rx_obj *rxo;
2545 int i, work = 0;
2546
2547 if (!be_lock_busy_poll(eqo))
2548 return LL_FLUSH_BUSY;
2549
2550 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2551 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2552 if (work)
2553 break;
2554 }
2555
2556 be_unlock_busy_poll(eqo);
2557 return work;
2558}
2559#endif
2560
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002561void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002562{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002563 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2564 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002565 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302566 bool error_detected = false;
2567 struct device *dev = &adapter->pdev->dev;
2568 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002569
Sathya Perlad23e9462012-12-17 19:38:51 +00002570 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002571 return;
2572
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002573 if (lancer_chip(adapter)) {
2574 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2575 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2576 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302577 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002578 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302579 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302580 adapter->hw_error = true;
2581 /* Do not log error messages if its a FW reset */
2582 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2583 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2584 dev_info(dev, "Firmware update in progress\n");
2585 } else {
2586 error_detected = true;
2587 dev_err(dev, "Error detected in the card\n");
2588 dev_err(dev, "ERR: sliport status 0x%x\n",
2589 sliport_status);
2590 dev_err(dev, "ERR: sliport error1 0x%x\n",
2591 sliport_err1);
2592 dev_err(dev, "ERR: sliport error2 0x%x\n",
2593 sliport_err2);
2594 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002595 }
2596 } else {
2597 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302598 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002599 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302600 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002601 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302602 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002603 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302604 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002605
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002606 ue_lo = (ue_lo & ~ue_lo_mask);
2607 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002608
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302609 /* On certain platforms BE hardware can indicate spurious UEs.
2610 * Allow HW to stop working completely in case of a real UE.
2611 * Hence not setting the hw_error for UE detection.
2612 */
2613
2614 if (ue_lo || ue_hi) {
2615 error_detected = true;
2616 dev_err(dev,
2617 "Unrecoverable Error detected in the adapter");
2618 dev_err(dev, "Please reboot server to recover");
2619 if (skyhawk_chip(adapter))
2620 adapter->hw_error = true;
2621 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2622 if (ue_lo & 1)
2623 dev_err(dev, "UE: %s bit set\n",
2624 ue_status_low_desc[i]);
2625 }
2626 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2627 if (ue_hi & 1)
2628 dev_err(dev, "UE: %s bit set\n",
2629 ue_status_hi_desc[i]);
2630 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302631 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002632 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302633 if (error_detected)
2634 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002635}
2636
Sathya Perla8d56ff12009-11-22 22:02:26 +00002637static void be_msix_disable(struct be_adapter *adapter)
2638{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002639 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002640 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002641 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302642 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002643 }
2644}
2645
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002646static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002647{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002648 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002649 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650
Sathya Perla92bf14a2013-08-27 16:57:32 +05302651 /* If RoCE is supported, program the max number of NIC vectors that
2652 * may be configured via set-channels, along with vectors needed for
2653 * RoCe. Else, just program the number we'll use initially.
2654 */
2655 if (be_roce_supported(adapter))
2656 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2657 2 * num_online_cpus());
2658 else
2659 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002660
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002661 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662 adapter->msix_entries[i].entry = i;
2663
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002664 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2665 MIN_MSIX_VECTORS, num_vec);
2666 if (num_vec < 0)
2667 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002668
Sathya Perla92bf14a2013-08-27 16:57:32 +05302669 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2670 adapter->num_msix_roce_vec = num_vec / 2;
2671 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2672 adapter->num_msix_roce_vec);
2673 }
2674
2675 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2676
2677 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2678 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002679 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002680
2681fail:
2682 dev_warn(dev, "MSIx enable failed\n");
2683
2684 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2685 if (!be_physfn(adapter))
2686 return num_vec;
2687 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688}
2689
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002690static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302691 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302693 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002694}
2695
2696static int be_msix_register(struct be_adapter *adapter)
2697{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002698 struct net_device *netdev = adapter->netdev;
2699 struct be_eq_obj *eqo;
2700 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002702 for_all_evt_queues(adapter, eqo, i) {
2703 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2704 vec = be_msix_vec_get(adapter, eqo);
2705 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002706 if (status)
2707 goto err_msix;
2708 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002709
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002711err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2713 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2714 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302715 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002716 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717 return status;
2718}
2719
2720static int be_irq_register(struct be_adapter *adapter)
2721{
2722 struct net_device *netdev = adapter->netdev;
2723 int status;
2724
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002725 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726 status = be_msix_register(adapter);
2727 if (status == 0)
2728 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002729 /* INTx is not supported for VF */
2730 if (!be_physfn(adapter))
2731 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 }
2733
Sathya Perlae49cc342012-11-27 19:50:02 +00002734 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735 netdev->irq = adapter->pdev->irq;
2736 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002737 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002738 if (status) {
2739 dev_err(&adapter->pdev->dev,
2740 "INTx request IRQ failed - err %d\n", status);
2741 return status;
2742 }
2743done:
2744 adapter->isr_registered = true;
2745 return 0;
2746}
2747
2748static void be_irq_unregister(struct be_adapter *adapter)
2749{
2750 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002752 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753
2754 if (!adapter->isr_registered)
2755 return;
2756
2757 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002758 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002759 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760 goto done;
2761 }
2762
2763 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 for_all_evt_queues(adapter, eqo, i)
2765 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002766
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002767done:
2768 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002769}
2770
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002771static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002772{
2773 struct be_queue_info *q;
2774 struct be_rx_obj *rxo;
2775 int i;
2776
2777 for_all_rx_queues(adapter, rxo, i) {
2778 q = &rxo->q;
2779 if (q->created) {
2780 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002782 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002784 }
2785}
2786
Sathya Perla889cd4b2010-05-30 23:33:45 +00002787static int be_close(struct net_device *netdev)
2788{
2789 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790 struct be_eq_obj *eqo;
2791 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002792
Kalesh APe1ad8e32014-04-14 16:12:41 +05302793 /* This protection is needed as be_close() may be called even when the
2794 * adapter is in cleared state (after eeh perm failure)
2795 */
2796 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2797 return 0;
2798
Parav Pandit045508a2012-03-26 14:27:13 +00002799 be_roce_dev_close(adapter);
2800
Ivan Veceradff345c52013-11-27 08:59:32 +01002801 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2802 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002803 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302804 be_disable_busy_poll(eqo);
2805 }
David S. Miller71237b62013-11-28 18:53:36 -05002806 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002807 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002808
2809 be_async_mcc_disable(adapter);
2810
2811 /* Wait for all pending tx completions to arrive so that
2812 * all tx skbs are freed.
2813 */
Sathya Perlafba87552013-05-08 02:05:50 +00002814 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302815 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002816
2817 be_rx_qs_destroy(adapter);
2818
Ajit Khaparded11a3472013-11-18 10:44:37 -06002819 for (i = 1; i < (adapter->uc_macs + 1); i++)
2820 be_cmd_pmac_del(adapter, adapter->if_handle,
2821 adapter->pmac_id[i], 0);
2822 adapter->uc_macs = 0;
2823
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002824 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 if (msix_enabled(adapter))
2826 synchronize_irq(be_msix_vec_get(adapter, eqo));
2827 else
2828 synchronize_irq(netdev->irq);
2829 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002830 }
2831
Sathya Perla889cd4b2010-05-30 23:33:45 +00002832 be_irq_unregister(adapter);
2833
Sathya Perla482c9e72011-06-29 23:33:17 +00002834 return 0;
2835}
2836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002838{
2839 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002840 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302841 u8 rss_hkey[RSS_HASH_KEY_LEN];
2842 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002843
2844 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2846 sizeof(struct be_eth_rx_d));
2847 if (rc)
2848 return rc;
2849 }
2850
2851 /* The FW would like the default RXQ to be created first */
2852 rxo = default_rxo(adapter);
2853 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2854 adapter->if_handle, false, &rxo->rss_id);
2855 if (rc)
2856 return rc;
2857
2858 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002859 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002860 rx_frag_size, adapter->if_handle,
2861 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002862 if (rc)
2863 return rc;
2864 }
2865
2866 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302867 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2868 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002869 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302870 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002871 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302872 rss->rsstable[j + i] = rxo->rss_id;
2873 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002874 }
2875 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302876 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2877 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002878
2879 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302880 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2881 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302882 } else {
2883 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302884 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302885 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002886
Venkata Duvvurue2557872014-04-21 15:38:00 +05302887 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302888 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302889 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302890 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302891 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302892 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002893 }
2894
Venkata Duvvurue2557872014-04-21 15:38:00 +05302895 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2896
Sathya Perla482c9e72011-06-29 23:33:17 +00002897 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002898 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002899 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002900 return 0;
2901}
2902
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002903static int be_open(struct net_device *netdev)
2904{
2905 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002906 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002907 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002908 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002909 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002910 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002911
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002912 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002913 if (status)
2914 goto err;
2915
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002916 status = be_irq_register(adapter);
2917 if (status)
2918 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002920 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002921 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002922
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002923 for_all_tx_queues(adapter, txo, i)
2924 be_cq_notify(adapter, txo->cq.id, true, 0);
2925
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002926 be_async_mcc_enable(adapter);
2927
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 for_all_evt_queues(adapter, eqo, i) {
2929 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302930 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302931 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002932 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002933 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002934
Sathya Perla323ff712012-09-28 04:39:43 +00002935 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002936 if (!status)
2937 be_link_status_update(adapter, link_status);
2938
Sathya Perlafba87552013-05-08 02:05:50 +00002939 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002940 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302941
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302942#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302943 if (skyhawk_chip(adapter))
2944 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302945#endif
2946
Sathya Perla889cd4b2010-05-30 23:33:45 +00002947 return 0;
2948err:
2949 be_close(adapter->netdev);
2950 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002951}
2952
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002953static int be_setup_wol(struct be_adapter *adapter, bool enable)
2954{
2955 struct be_dma_mem cmd;
2956 int status = 0;
2957 u8 mac[ETH_ALEN];
2958
2959 memset(mac, 0, ETH_ALEN);
2960
2961 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002962 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2963 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302964 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302965 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002966
2967 if (enable) {
2968 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302969 PCICFG_PM_CONTROL_OFFSET,
2970 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002971 if (status) {
2972 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002973 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002974 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2975 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002976 return status;
2977 }
2978 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302979 adapter->netdev->dev_addr,
2980 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002981 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2982 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2983 } else {
2984 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2985 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2986 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2987 }
2988
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002989 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002990 return status;
2991}
2992
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002993/*
2994 * Generate a seed MAC address from the PF MAC Address using jhash.
2995 * MAC Address for VFs are assigned incrementally starting from the seed.
2996 * These addresses are programmed in the ASIC by the PF and the VF driver
2997 * queries for the MAC address during its probe.
2998 */
Sathya Perla4c876612013-02-03 20:30:11 +00002999static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003000{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003001 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003002 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003003 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003004 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003005
3006 be_vf_eth_addr_generate(adapter, mac);
3007
Sathya Perla11ac75e2011-12-13 00:58:50 +00003008 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303009 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003010 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003011 vf_cfg->if_handle,
3012 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303013 else
3014 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3015 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003016
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003017 if (status)
3018 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303019 "Mac address assignment failed for VF %d\n",
3020 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003021 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003022 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003023
3024 mac[5] += 1;
3025 }
3026 return status;
3027}
3028
Sathya Perla4c876612013-02-03 20:30:11 +00003029static int be_vfs_mac_query(struct be_adapter *adapter)
3030{
3031 int status, vf;
3032 u8 mac[ETH_ALEN];
3033 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003034
3035 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303036 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3037 mac, vf_cfg->if_handle,
3038 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003039 if (status)
3040 return status;
3041 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3042 }
3043 return 0;
3044}
3045
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003046static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003047{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003048 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003049 u32 vf;
3050
Sathya Perla257a3fe2013-06-14 15:54:51 +05303051 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003052 dev_warn(&adapter->pdev->dev,
3053 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003054 goto done;
3055 }
3056
Sathya Perlab4c1df92013-05-08 02:05:47 +00003057 pci_disable_sriov(adapter->pdev);
3058
Sathya Perla11ac75e2011-12-13 00:58:50 +00003059 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303060 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003061 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3062 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303063 else
3064 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3065 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003066
Sathya Perla11ac75e2011-12-13 00:58:50 +00003067 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3068 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003069done:
3070 kfree(adapter->vf_cfg);
3071 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303072 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003073}
3074
Sathya Perla77071332013-08-27 16:57:34 +05303075static void be_clear_queues(struct be_adapter *adapter)
3076{
3077 be_mcc_queues_destroy(adapter);
3078 be_rx_cqs_destroy(adapter);
3079 be_tx_queues_destroy(adapter);
3080 be_evt_queues_destroy(adapter);
3081}
3082
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303083static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003084{
Sathya Perla191eb752012-02-23 18:50:13 +00003085 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3086 cancel_delayed_work_sync(&adapter->work);
3087 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3088 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303089}
3090
Somnath Koturb05004a2013-12-05 12:08:16 +05303091static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303092{
3093 int i;
3094
Somnath Koturb05004a2013-12-05 12:08:16 +05303095 if (adapter->pmac_id) {
3096 for (i = 0; i < (adapter->uc_macs + 1); i++)
3097 be_cmd_pmac_del(adapter, adapter->if_handle,
3098 adapter->pmac_id[i], 0);
3099 adapter->uc_macs = 0;
3100
3101 kfree(adapter->pmac_id);
3102 adapter->pmac_id = NULL;
3103 }
3104}
3105
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303106#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303107static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3108{
3109 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3110 be_cmd_manage_iface(adapter, adapter->if_handle,
3111 OP_CONVERT_TUNNEL_TO_NORMAL);
3112
3113 if (adapter->vxlan_port)
3114 be_cmd_set_vxlan_port(adapter, 0);
3115
3116 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3117 adapter->vxlan_port = 0;
3118}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303119#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303120
Somnath Koturb05004a2013-12-05 12:08:16 +05303121static int be_clear(struct be_adapter *adapter)
3122{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303123 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003124
Sathya Perla11ac75e2011-12-13 00:58:50 +00003125 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003126 be_vf_clear(adapter);
3127
Vasundhara Volambec84e62014-06-30 13:01:32 +05303128 /* Re-configure FW to distribute resources evenly across max-supported
3129 * number of VFs, only when VFs are not already enabled.
3130 */
3131 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3132 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3133 pci_sriov_get_totalvfs(adapter->pdev));
3134
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303135#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303136 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303137#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303138 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303139 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003140
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003141 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003142
Sathya Perla77071332013-08-27 16:57:34 +05303143 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003145 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303146 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003147 return 0;
3148}
3149
Sathya Perla4c876612013-02-03 20:30:11 +00003150static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003151{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303152 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003153 struct be_vf_cfg *vf_cfg;
3154 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003155 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003156
Sathya Perla4c876612013-02-03 20:30:11 +00003157 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3158 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003159
Sathya Perla4c876612013-02-03 20:30:11 +00003160 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303161 if (!BE3_chip(adapter)) {
3162 status = be_cmd_get_profile_config(adapter, &res,
3163 vf + 1);
3164 if (!status)
3165 cap_flags = res.if_cap_flags;
3166 }
Sathya Perla4c876612013-02-03 20:30:11 +00003167
3168 /* If a FW profile exists, then cap_flags are updated */
3169 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303170 BE_IF_FLAGS_BROADCAST |
3171 BE_IF_FLAGS_MULTICAST);
3172 status =
3173 be_cmd_if_create(adapter, cap_flags, en_flags,
3174 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003175 if (status)
3176 goto err;
3177 }
3178err:
3179 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003180}
3181
Sathya Perla39f1d942012-05-08 19:41:24 +00003182static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003183{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003184 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003185 int vf;
3186
Sathya Perla39f1d942012-05-08 19:41:24 +00003187 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3188 GFP_KERNEL);
3189 if (!adapter->vf_cfg)
3190 return -ENOMEM;
3191
Sathya Perla11ac75e2011-12-13 00:58:50 +00003192 for_all_vfs(adapter, vf_cfg, vf) {
3193 vf_cfg->if_handle = -1;
3194 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003195 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003196 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003197}
3198
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003199static int be_vf_setup(struct be_adapter *adapter)
3200{
Sathya Perla4c876612013-02-03 20:30:11 +00003201 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303202 struct be_vf_cfg *vf_cfg;
3203 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303204 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003205
Sathya Perla257a3fe2013-06-14 15:54:51 +05303206 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003207
3208 status = be_vf_setup_init(adapter);
3209 if (status)
3210 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003211
Sathya Perla4c876612013-02-03 20:30:11 +00003212 if (old_vfs) {
3213 for_all_vfs(adapter, vf_cfg, vf) {
3214 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3215 if (status)
3216 goto err;
3217 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003218
Sathya Perla4c876612013-02-03 20:30:11 +00003219 status = be_vfs_mac_query(adapter);
3220 if (status)
3221 goto err;
3222 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303223 status = be_vfs_if_create(adapter);
3224 if (status)
3225 goto err;
3226
Sathya Perla39f1d942012-05-08 19:41:24 +00003227 status = be_vf_eth_addr_config(adapter);
3228 if (status)
3229 goto err;
3230 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003231
Sathya Perla11ac75e2011-12-13 00:58:50 +00003232 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303233 /* Allow VFs to programs MAC/VLAN filters */
3234 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3235 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3236 status = be_cmd_set_fn_privileges(adapter,
3237 privileges |
3238 BE_PRIV_FILTMGMT,
3239 vf + 1);
3240 if (!status)
3241 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3242 vf);
3243 }
3244
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303245 /* Allow full available bandwidth */
3246 if (!old_vfs)
3247 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003248
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303249 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303250 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303251 be_cmd_set_logical_link_config(adapter,
3252 IFLA_VF_LINK_STATE_AUTO,
3253 vf+1);
3254 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003255 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003256
3257 if (!old_vfs) {
3258 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3259 if (status) {
3260 dev_err(dev, "SRIOV enable failed\n");
3261 adapter->num_vfs = 0;
3262 goto err;
3263 }
3264 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303265
3266 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003267 return 0;
3268err:
Sathya Perla4c876612013-02-03 20:30:11 +00003269 dev_err(dev, "VF setup failed\n");
3270 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003271 return status;
3272}
3273
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303274/* Converting function_mode bits on BE3 to SH mc_type enums */
3275
3276static u8 be_convert_mc_type(u32 function_mode)
3277{
Suresh Reddy66064db2014-06-23 16:41:29 +05303278 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303279 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303280 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303281 return FLEX10;
3282 else if (function_mode & VNIC_MODE)
3283 return vNIC2;
3284 else if (function_mode & UMC_ENABLED)
3285 return UMC;
3286 else
3287 return MC_NONE;
3288}
3289
Sathya Perla92bf14a2013-08-27 16:57:32 +05303290/* On BE2/BE3 FW does not suggest the supported limits */
3291static void BEx_get_resources(struct be_adapter *adapter,
3292 struct be_resources *res)
3293{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303294 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303295
3296 if (be_physfn(adapter))
3297 res->max_uc_mac = BE_UC_PMAC_COUNT;
3298 else
3299 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3300
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303301 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3302
3303 if (be_is_mc(adapter)) {
3304 /* Assuming that there are 4 channels per port,
3305 * when multi-channel is enabled
3306 */
3307 if (be_is_qnq_mode(adapter))
3308 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3309 else
3310 /* In a non-qnq multichannel mode, the pvid
3311 * takes up one vlan entry
3312 */
3313 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3314 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303315 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303316 }
3317
Sathya Perla92bf14a2013-08-27 16:57:32 +05303318 res->max_mcast_mac = BE_MAX_MC;
3319
Vasundhara Volama5243da2014-03-11 18:53:07 +05303320 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3321 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3322 * *only* if it is RSS-capable.
3323 */
3324 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3325 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303326 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303327 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303328 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3329 struct be_resources super_nic_res = {0};
3330
3331 /* On a SuperNIC profile, the driver needs to use the
3332 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3333 */
3334 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3335 /* Some old versions of BE3 FW don't report max_tx_qs value */
3336 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3337 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303338 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303339 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303340
3341 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3342 !use_sriov && be_physfn(adapter))
3343 res->max_rss_qs = (adapter->be3_native) ?
3344 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3345 res->max_rx_qs = res->max_rss_qs + 1;
3346
Suresh Reddye3dc8672014-01-06 13:02:25 +05303347 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303348 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303349 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3350 else
3351 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303352
3353 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3354 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3355 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3356}
3357
Sathya Perla30128032011-11-10 19:17:57 +00003358static void be_setup_init(struct be_adapter *adapter)
3359{
3360 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003361 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003362 adapter->if_handle = -1;
3363 adapter->be3_native = false;
3364 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003365 if (be_physfn(adapter))
3366 adapter->cmd_privileges = MAX_PRIVILEGES;
3367 else
3368 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003369}
3370
Vasundhara Volambec84e62014-06-30 13:01:32 +05303371static int be_get_sriov_config(struct be_adapter *adapter)
3372{
3373 struct device *dev = &adapter->pdev->dev;
3374 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303375 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303376
3377 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303378 be_cmd_get_profile_config(adapter, &res, 0);
3379
Vasundhara Volambec84e62014-06-30 13:01:32 +05303380 if (BE3_chip(adapter) && !res.max_vfs) {
3381 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3382 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3383 }
3384
Sathya Perlad3d18312014-08-01 17:47:30 +05303385 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303386
3387 if (!be_max_vfs(adapter)) {
3388 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303389 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303390 adapter->num_vfs = 0;
3391 return 0;
3392 }
3393
Sathya Perlad3d18312014-08-01 17:47:30 +05303394 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3395
Vasundhara Volambec84e62014-06-30 13:01:32 +05303396 /* validate num_vfs module param */
3397 old_vfs = pci_num_vf(adapter->pdev);
3398 if (old_vfs) {
3399 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3400 if (old_vfs != num_vfs)
3401 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3402 adapter->num_vfs = old_vfs;
3403 } else {
3404 if (num_vfs > be_max_vfs(adapter)) {
3405 dev_info(dev, "Resources unavailable to init %d VFs\n",
3406 num_vfs);
3407 dev_info(dev, "Limiting to %d VFs\n",
3408 be_max_vfs(adapter));
3409 }
3410 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3411 }
3412
3413 return 0;
3414}
3415
Sathya Perla92bf14a2013-08-27 16:57:32 +05303416static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003417{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303418 struct device *dev = &adapter->pdev->dev;
3419 struct be_resources res = {0};
3420 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003421
Sathya Perla92bf14a2013-08-27 16:57:32 +05303422 if (BEx_chip(adapter)) {
3423 BEx_get_resources(adapter, &res);
3424 adapter->res = res;
3425 }
3426
Sathya Perla92bf14a2013-08-27 16:57:32 +05303427 /* For Lancer, SH etc read per-function resource limits from FW.
3428 * GET_FUNC_CONFIG returns per function guaranteed limits.
3429 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3430 */
Sathya Perla4c876612013-02-03 20:30:11 +00003431 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303432 status = be_cmd_get_func_config(adapter, &res);
3433 if (status)
3434 return status;
3435
3436 /* If RoCE may be enabled stash away half the EQs for RoCE */
3437 if (be_roce_supported(adapter))
3438 res.max_evt_qs /= 2;
3439 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003440 }
3441
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303442 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3443 be_max_txqs(adapter), be_max_rxqs(adapter),
3444 be_max_rss(adapter), be_max_eqs(adapter),
3445 be_max_vfs(adapter));
3446 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3447 be_max_uc(adapter), be_max_mc(adapter),
3448 be_max_vlans(adapter));
3449
Sathya Perla92bf14a2013-08-27 16:57:32 +05303450 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003451}
3452
Sathya Perlad3d18312014-08-01 17:47:30 +05303453static void be_sriov_config(struct be_adapter *adapter)
3454{
3455 struct device *dev = &adapter->pdev->dev;
3456 int status;
3457
3458 status = be_get_sriov_config(adapter);
3459 if (status) {
3460 dev_err(dev, "Failed to query SR-IOV configuration\n");
3461 dev_err(dev, "SR-IOV cannot be enabled\n");
3462 return;
3463 }
3464
3465 /* When the HW is in SRIOV capable configuration, the PF-pool
3466 * resources are equally distributed across the max-number of
3467 * VFs. The user may request only a subset of the max-vfs to be
3468 * enabled. Based on num_vfs, redistribute the resources across
3469 * num_vfs so that each VF will have access to more number of
3470 * resources. This facility is not available in BE3 FW.
3471 * Also, this is done by FW in Lancer chip.
3472 */
3473 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3474 status = be_cmd_set_sriov_config(adapter,
3475 adapter->pool_res,
3476 adapter->num_vfs);
3477 if (status)
3478 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3479 }
3480}
3481
Sathya Perla39f1d942012-05-08 19:41:24 +00003482static int be_get_config(struct be_adapter *adapter)
3483{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303484 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003485 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003486
Kalesh APe97e3cd2014-07-17 16:20:26 +05303487 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003488 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303489 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003490
Vasundhara Volam542963b2014-01-15 13:23:33 +05303491 if (be_physfn(adapter)) {
3492 status = be_cmd_get_active_profile(adapter, &profile_id);
3493 if (!status)
3494 dev_info(&adapter->pdev->dev,
3495 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303496 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303497
Sathya Perlad3d18312014-08-01 17:47:30 +05303498 if (!BE2_chip(adapter) && be_physfn(adapter))
3499 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303500
Sathya Perla92bf14a2013-08-27 16:57:32 +05303501 status = be_get_resources(adapter);
3502 if (status)
3503 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003504
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303505 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3506 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303507 if (!adapter->pmac_id)
3508 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003509
Sathya Perla92bf14a2013-08-27 16:57:32 +05303510 /* Sanitize cfg_num_qs based on HW and platform limits */
3511 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3512
3513 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003514}
3515
Sathya Perla95046b92013-07-23 15:25:02 +05303516static int be_mac_setup(struct be_adapter *adapter)
3517{
3518 u8 mac[ETH_ALEN];
3519 int status;
3520
3521 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3522 status = be_cmd_get_perm_mac(adapter, mac);
3523 if (status)
3524 return status;
3525
3526 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3527 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3528 } else {
3529 /* Maybe the HW was reset; dev_addr must be re-programmed */
3530 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3531 }
3532
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003533 /* For BE3-R VFs, the PF programs the initial MAC address */
3534 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3535 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3536 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303537 return 0;
3538}
3539
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303540static void be_schedule_worker(struct be_adapter *adapter)
3541{
3542 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3543 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3544}
3545
Sathya Perla77071332013-08-27 16:57:34 +05303546static int be_setup_queues(struct be_adapter *adapter)
3547{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303548 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303549 int status;
3550
3551 status = be_evt_queues_create(adapter);
3552 if (status)
3553 goto err;
3554
3555 status = be_tx_qs_create(adapter);
3556 if (status)
3557 goto err;
3558
3559 status = be_rx_cqs_create(adapter);
3560 if (status)
3561 goto err;
3562
3563 status = be_mcc_queues_create(adapter);
3564 if (status)
3565 goto err;
3566
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303567 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3568 if (status)
3569 goto err;
3570
3571 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3572 if (status)
3573 goto err;
3574
Sathya Perla77071332013-08-27 16:57:34 +05303575 return 0;
3576err:
3577 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3578 return status;
3579}
3580
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303581int be_update_queues(struct be_adapter *adapter)
3582{
3583 struct net_device *netdev = adapter->netdev;
3584 int status;
3585
3586 if (netif_running(netdev))
3587 be_close(netdev);
3588
3589 be_cancel_worker(adapter);
3590
3591 /* If any vectors have been shared with RoCE we cannot re-program
3592 * the MSIx table.
3593 */
3594 if (!adapter->num_msix_roce_vec)
3595 be_msix_disable(adapter);
3596
3597 be_clear_queues(adapter);
3598
3599 if (!msix_enabled(adapter)) {
3600 status = be_msix_enable(adapter);
3601 if (status)
3602 return status;
3603 }
3604
3605 status = be_setup_queues(adapter);
3606 if (status)
3607 return status;
3608
3609 be_schedule_worker(adapter);
3610
3611 if (netif_running(netdev))
3612 status = be_open(netdev);
3613
3614 return status;
3615}
3616
Sathya Perla5fb379e2009-06-18 00:02:59 +00003617static int be_setup(struct be_adapter *adapter)
3618{
Sathya Perla39f1d942012-05-08 19:41:24 +00003619 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303620 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003621 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622
Sathya Perla30128032011-11-10 19:17:57 +00003623 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003624
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003625 if (!lancer_chip(adapter))
3626 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003627
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003628 status = be_get_config(adapter);
3629 if (status)
3630 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003631
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003632 status = be_msix_enable(adapter);
3633 if (status)
3634 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003635
Sathya Perla77071332013-08-27 16:57:34 +05303636 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3637 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3638 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3639 en_flags |= BE_IF_FLAGS_RSS;
3640 en_flags = en_flags & be_if_cap_flags(adapter);
3641 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3642 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003643 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003644 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303646 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3647 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303648 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303649 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003650 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003651 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003652
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003653 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003654
Sathya Perla95046b92013-07-23 15:25:02 +05303655 status = be_mac_setup(adapter);
3656 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003657 goto err;
3658
Kalesh APe97e3cd2014-07-17 16:20:26 +05303659 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303660 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003661
Somnath Koture9e2a902013-10-24 14:37:53 +05303662 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303663 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303664 adapter->fw_ver);
3665 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3666 }
3667
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003668 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003669 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003670
3671 be_set_rx_mode(adapter->netdev);
3672
Suresh Reddy76a9e082014-01-15 13:23:40 +05303673 be_cmd_get_acpi_wol_cap(adapter);
3674
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003675 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003676
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003677 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3678 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003679 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003680
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303681 if (be_physfn(adapter))
3682 be_cmd_set_logical_link_config(adapter,
3683 IFLA_VF_LINK_STATE_AUTO, 0);
3684
Vasundhara Volambec84e62014-06-30 13:01:32 +05303685 if (adapter->num_vfs)
3686 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003687
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003688 status = be_cmd_get_phy_info(adapter);
3689 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003690 adapter->phy.fc_autoneg = 1;
3691
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303692 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303693 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003694 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003695err:
3696 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003697 return status;
3698}
3699
Ivan Vecera66268732011-12-08 01:31:21 +00003700#ifdef CONFIG_NET_POLL_CONTROLLER
3701static void be_netpoll(struct net_device *netdev)
3702{
3703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003704 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003705 int i;
3706
Sathya Perlae49cc342012-11-27 19:50:02 +00003707 for_all_evt_queues(adapter, eqo, i) {
3708 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3709 napi_schedule(&eqo->napi);
3710 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003711
3712 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003713}
3714#endif
3715
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303716static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003717
Sathya Perla306f1342011-08-02 19:57:45 +00003718static bool phy_flashing_required(struct be_adapter *adapter)
3719{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003720 return (adapter->phy.phy_type == TN_8022 &&
3721 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003722}
3723
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003724static bool is_comp_in_ufi(struct be_adapter *adapter,
3725 struct flash_section_info *fsec, int type)
3726{
3727 int i = 0, img_type = 0;
3728 struct flash_section_info_g2 *fsec_g2 = NULL;
3729
Sathya Perlaca34fe32012-11-06 17:48:56 +00003730 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003731 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3732
3733 for (i = 0; i < MAX_FLASH_COMP; i++) {
3734 if (fsec_g2)
3735 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3736 else
3737 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3738
3739 if (img_type == type)
3740 return true;
3741 }
3742 return false;
3743
3744}
3745
Jingoo Han4188e7d2013-08-05 18:02:02 +09003746static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303747 int header_size,
3748 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003749{
3750 struct flash_section_info *fsec = NULL;
3751 const u8 *p = fw->data;
3752
3753 p += header_size;
3754 while (p < (fw->data + fw->size)) {
3755 fsec = (struct flash_section_info *)p;
3756 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3757 return fsec;
3758 p += 32;
3759 }
3760 return NULL;
3761}
3762
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303763static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3764 u32 img_offset, u32 img_size, int hdr_size,
3765 u16 img_optype, bool *crc_match)
3766{
3767 u32 crc_offset;
3768 int status;
3769 u8 crc[4];
3770
3771 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3772 if (status)
3773 return status;
3774
3775 crc_offset = hdr_size + img_offset + img_size - 4;
3776
3777 /* Skip flashing, if crc of flashed region matches */
3778 if (!memcmp(crc, p + crc_offset, 4))
3779 *crc_match = true;
3780 else
3781 *crc_match = false;
3782
3783 return status;
3784}
3785
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003786static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303787 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003788{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003789 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303790 u32 total_bytes, flash_op, num_bytes;
3791 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003792
3793 total_bytes = img_size;
3794 while (total_bytes) {
3795 num_bytes = min_t(u32, 32*1024, total_bytes);
3796
3797 total_bytes -= num_bytes;
3798
3799 if (!total_bytes) {
3800 if (optype == OPTYPE_PHY_FW)
3801 flash_op = FLASHROM_OPER_PHY_FLASH;
3802 else
3803 flash_op = FLASHROM_OPER_FLASH;
3804 } else {
3805 if (optype == OPTYPE_PHY_FW)
3806 flash_op = FLASHROM_OPER_PHY_SAVE;
3807 else
3808 flash_op = FLASHROM_OPER_SAVE;
3809 }
3810
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003811 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003812 img += num_bytes;
3813 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303814 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303815 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303816 optype == OPTYPE_PHY_FW)
3817 break;
3818 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003819 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003820 }
3821 return 0;
3822}
3823
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003824/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003825static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303826 const struct firmware *fw,
3827 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003828{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003829 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303830 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003831 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303832 int status, i, filehdr_size, num_comp;
3833 const struct flash_comp *pflashcomp;
3834 bool crc_match;
3835 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003836
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003837 struct flash_comp gen3_flash_types[] = {
3838 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3839 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3840 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3841 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3842 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3843 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3844 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3845 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3846 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3847 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3848 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3849 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3850 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3851 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3852 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3853 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3854 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3855 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3856 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3857 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003858 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003859
3860 struct flash_comp gen2_flash_types[] = {
3861 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3862 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3863 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3864 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3865 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3866 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3867 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3868 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3869 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3870 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3871 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3872 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3873 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3874 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3875 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3876 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003877 };
3878
Sathya Perlaca34fe32012-11-06 17:48:56 +00003879 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003880 pflashcomp = gen3_flash_types;
3881 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003882 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003883 } else {
3884 pflashcomp = gen2_flash_types;
3885 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003886 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003887 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003888
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003889 /* Get flash section info*/
3890 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3891 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303892 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003893 return -1;
3894 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003895 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003896 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003897 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003898
3899 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3900 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3901 continue;
3902
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003903 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3904 !phy_flashing_required(adapter))
3905 continue;
3906
3907 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303908 status = be_check_flash_crc(adapter, fw->data,
3909 pflashcomp[i].offset,
3910 pflashcomp[i].size,
3911 filehdr_size +
3912 img_hdrs_size,
3913 OPTYPE_REDBOOT, &crc_match);
3914 if (status) {
3915 dev_err(dev,
3916 "Could not get CRC for 0x%x region\n",
3917 pflashcomp[i].optype);
3918 continue;
3919 }
3920
3921 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003922 continue;
3923 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003924
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303925 p = fw->data + filehdr_size + pflashcomp[i].offset +
3926 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003927 if (p + pflashcomp[i].size > fw->data + fw->size)
3928 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003929
3930 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303931 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003932 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303933 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003934 pflashcomp[i].img_type);
3935 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003936 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003937 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003938 return 0;
3939}
3940
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303941static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3942{
3943 u32 img_type = le32_to_cpu(fsec_entry.type);
3944 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3945
3946 if (img_optype != 0xFFFF)
3947 return img_optype;
3948
3949 switch (img_type) {
3950 case IMAGE_FIRMWARE_iSCSI:
3951 img_optype = OPTYPE_ISCSI_ACTIVE;
3952 break;
3953 case IMAGE_BOOT_CODE:
3954 img_optype = OPTYPE_REDBOOT;
3955 break;
3956 case IMAGE_OPTION_ROM_ISCSI:
3957 img_optype = OPTYPE_BIOS;
3958 break;
3959 case IMAGE_OPTION_ROM_PXE:
3960 img_optype = OPTYPE_PXE_BIOS;
3961 break;
3962 case IMAGE_OPTION_ROM_FCoE:
3963 img_optype = OPTYPE_FCOE_BIOS;
3964 break;
3965 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3966 img_optype = OPTYPE_ISCSI_BACKUP;
3967 break;
3968 case IMAGE_NCSI:
3969 img_optype = OPTYPE_NCSI_FW;
3970 break;
3971 case IMAGE_FLASHISM_JUMPVECTOR:
3972 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3973 break;
3974 case IMAGE_FIRMWARE_PHY:
3975 img_optype = OPTYPE_SH_PHY_FW;
3976 break;
3977 case IMAGE_REDBOOT_DIR:
3978 img_optype = OPTYPE_REDBOOT_DIR;
3979 break;
3980 case IMAGE_REDBOOT_CONFIG:
3981 img_optype = OPTYPE_REDBOOT_CONFIG;
3982 break;
3983 case IMAGE_UFI_DIR:
3984 img_optype = OPTYPE_UFI_DIR;
3985 break;
3986 default:
3987 break;
3988 }
3989
3990 return img_optype;
3991}
3992
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003993static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303994 const struct firmware *fw,
3995 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003996{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003997 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303998 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003999 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304000 u32 img_offset, img_size, img_type;
4001 int status, i, filehdr_size;
4002 bool crc_match, old_fw_img;
4003 u16 img_optype;
4004 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004005
4006 filehdr_size = sizeof(struct flash_file_hdr_g3);
4007 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4008 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304009 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304010 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004011 }
4012
4013 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4014 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4015 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304016 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4017 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4018 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004019
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304020 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004021 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304022 /* Don't bother verifying CRC if an old FW image is being
4023 * flashed
4024 */
4025 if (old_fw_img)
4026 goto flash;
4027
4028 status = be_check_flash_crc(adapter, fw->data, img_offset,
4029 img_size, filehdr_size +
4030 img_hdrs_size, img_optype,
4031 &crc_match);
4032 /* The current FW image on the card does not recognize the new
4033 * FLASH op_type. The FW download is partially complete.
4034 * Reboot the server now to enable FW image to recognize the
4035 * new FLASH op_type. To complete the remaining process,
4036 * download the same FW again after the reboot.
4037 */
Kalesh AP4c600052014-05-30 19:06:26 +05304038 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4039 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304040 dev_err(dev, "Flash incomplete. Reset the server\n");
4041 dev_err(dev, "Download FW image again after reset\n");
4042 return -EAGAIN;
4043 } else if (status) {
4044 dev_err(dev, "Could not get CRC for 0x%x region\n",
4045 img_optype);
4046 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004047 }
4048
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304049 if (crc_match)
4050 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004051
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304052flash:
4053 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004054 if (p + img_size > fw->data + fw->size)
4055 return -1;
4056
4057 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304058 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4059 * UFI_DIR region
4060 */
Kalesh AP4c600052014-05-30 19:06:26 +05304061 if (old_fw_img &&
4062 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4063 (img_optype == OPTYPE_UFI_DIR &&
4064 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304065 continue;
4066 } else if (status) {
4067 dev_err(dev, "Flashing section type 0x%x failed\n",
4068 img_type);
4069 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004070 }
4071 }
4072 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004073}
4074
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004075static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304076 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004077{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004078#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4079#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304080 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004081 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004082 const u8 *data_ptr = NULL;
4083 u8 *dest_image_ptr = NULL;
4084 size_t image_size = 0;
4085 u32 chunk_size = 0;
4086 u32 data_written = 0;
4087 u32 offset = 0;
4088 int status = 0;
4089 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004090 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004091
4092 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304093 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304094 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004095 }
4096
4097 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4098 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304099 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004100 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304101 if (!flash_cmd.va)
4102 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004103
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004104 dest_image_ptr = flash_cmd.va +
4105 sizeof(struct lancer_cmd_req_write_object);
4106 image_size = fw->size;
4107 data_ptr = fw->data;
4108
4109 while (image_size) {
4110 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4111
4112 /* Copy the image chunk content. */
4113 memcpy(dest_image_ptr, data_ptr, chunk_size);
4114
4115 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004116 chunk_size, offset,
4117 LANCER_FW_DOWNLOAD_LOCATION,
4118 &data_written, &change_status,
4119 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004120 if (status)
4121 break;
4122
4123 offset += data_written;
4124 data_ptr += data_written;
4125 image_size -= data_written;
4126 }
4127
4128 if (!status) {
4129 /* Commit the FW written */
4130 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004131 0, offset,
4132 LANCER_FW_DOWNLOAD_LOCATION,
4133 &data_written, &change_status,
4134 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004135 }
4136
Kalesh APbb864e02014-09-02 09:56:51 +05304137 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004138 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304139 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304140 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004141 }
4142
Kalesh APbb864e02014-09-02 09:56:51 +05304143 dev_info(dev, "Firmware flashed successfully\n");
4144
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004145 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304146 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004147 status = lancer_physdev_ctrl(adapter,
4148 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004149 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304150 dev_err(dev, "Adapter busy, could not reset FW\n");
4151 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004152 }
4153 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304154 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004155 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304156
4157 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004158}
4159
Sathya Perlaca34fe32012-11-06 17:48:56 +00004160#define UFI_TYPE2 2
4161#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004162#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004163#define UFI_TYPE4 4
4164static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004165 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004166{
Kalesh APddf11692014-07-17 16:20:28 +05304167 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004168 goto be_get_ufi_exit;
4169
Sathya Perlaca34fe32012-11-06 17:48:56 +00004170 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4171 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004172 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4173 if (fhdr->asic_type_rev == 0x10)
4174 return UFI_TYPE3R;
4175 else
4176 return UFI_TYPE3;
4177 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004178 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004179
4180be_get_ufi_exit:
4181 dev_err(&adapter->pdev->dev,
4182 "UFI and Interface are not compatible for flashing\n");
4183 return -1;
4184}
4185
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004186static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4187{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004188 struct flash_file_hdr_g3 *fhdr3;
4189 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004190 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004191 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004192 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004193
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004194 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004195 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4196 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004197 if (!flash_cmd.va) {
4198 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004199 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004200 }
4201
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004202 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004203 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004204
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004205 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004206
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004207 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4208 for (i = 0; i < num_imgs; i++) {
4209 img_hdr_ptr = (struct image_hdr *)(fw->data +
4210 (sizeof(struct flash_file_hdr_g3) +
4211 i * sizeof(struct image_hdr)));
4212 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004213 switch (ufi_type) {
4214 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004215 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304216 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004217 break;
4218 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004219 status = be_flash_BEx(adapter, fw, &flash_cmd,
4220 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004221 break;
4222 case UFI_TYPE3:
4223 /* Do not flash this ufi on BE3-R cards */
4224 if (adapter->asic_rev < 0x10)
4225 status = be_flash_BEx(adapter, fw,
4226 &flash_cmd,
4227 num_imgs);
4228 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304229 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004230 dev_err(&adapter->pdev->dev,
4231 "Can't load BE3 UFI on BE3R\n");
4232 }
4233 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004234 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004235 }
4236
Sathya Perlaca34fe32012-11-06 17:48:56 +00004237 if (ufi_type == UFI_TYPE2)
4238 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004239 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304240 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004241
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004242 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4243 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004244 if (status) {
4245 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004246 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004247 }
4248
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004249 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004250
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004251be_fw_exit:
4252 return status;
4253}
4254
4255int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4256{
4257 const struct firmware *fw;
4258 int status;
4259
4260 if (!netif_running(adapter->netdev)) {
4261 dev_err(&adapter->pdev->dev,
4262 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304263 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004264 }
4265
4266 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4267 if (status)
4268 goto fw_exit;
4269
4270 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4271
4272 if (lancer_chip(adapter))
4273 status = lancer_fw_download(adapter, fw);
4274 else
4275 status = be_fw_download(adapter, fw);
4276
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004277 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304278 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004279
Ajit Khaparde84517482009-09-04 03:12:16 +00004280fw_exit:
4281 release_firmware(fw);
4282 return status;
4283}
4284
Sathya Perla748b5392014-05-09 13:29:13 +05304285static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004286{
4287 struct be_adapter *adapter = netdev_priv(dev);
4288 struct nlattr *attr, *br_spec;
4289 int rem;
4290 int status = 0;
4291 u16 mode = 0;
4292
4293 if (!sriov_enabled(adapter))
4294 return -EOPNOTSUPP;
4295
4296 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4297
4298 nla_for_each_nested(attr, br_spec, rem) {
4299 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4300 continue;
4301
4302 mode = nla_get_u16(attr);
4303 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4304 return -EINVAL;
4305
4306 status = be_cmd_set_hsw_config(adapter, 0, 0,
4307 adapter->if_handle,
4308 mode == BRIDGE_MODE_VEPA ?
4309 PORT_FWD_TYPE_VEPA :
4310 PORT_FWD_TYPE_VEB);
4311 if (status)
4312 goto err;
4313
4314 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4315 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4316
4317 return status;
4318 }
4319err:
4320 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4321 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4322
4323 return status;
4324}
4325
4326static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304327 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004328{
4329 struct be_adapter *adapter = netdev_priv(dev);
4330 int status = 0;
4331 u8 hsw_mode;
4332
4333 if (!sriov_enabled(adapter))
4334 return 0;
4335
4336 /* BE and Lancer chips support VEB mode only */
4337 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4338 hsw_mode = PORT_FWD_TYPE_VEB;
4339 } else {
4340 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4341 adapter->if_handle, &hsw_mode);
4342 if (status)
4343 return 0;
4344 }
4345
4346 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4347 hsw_mode == PORT_FWD_TYPE_VEPA ?
4348 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4349}
4350
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304351#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304352static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4353 __be16 port)
4354{
4355 struct be_adapter *adapter = netdev_priv(netdev);
4356 struct device *dev = &adapter->pdev->dev;
4357 int status;
4358
4359 if (lancer_chip(adapter) || BEx_chip(adapter))
4360 return;
4361
4362 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4363 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4364 be16_to_cpu(port));
4365 dev_info(dev,
4366 "Only one UDP port supported for VxLAN offloads\n");
4367 return;
4368 }
4369
4370 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4371 OP_CONVERT_NORMAL_TO_TUNNEL);
4372 if (status) {
4373 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4374 goto err;
4375 }
4376
4377 status = be_cmd_set_vxlan_port(adapter, port);
4378 if (status) {
4379 dev_warn(dev, "Failed to add VxLAN port\n");
4380 goto err;
4381 }
4382 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4383 adapter->vxlan_port = port;
4384
4385 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4386 be16_to_cpu(port));
4387 return;
4388err:
4389 be_disable_vxlan_offloads(adapter);
4390 return;
4391}
4392
4393static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4394 __be16 port)
4395{
4396 struct be_adapter *adapter = netdev_priv(netdev);
4397
4398 if (lancer_chip(adapter) || BEx_chip(adapter))
4399 return;
4400
4401 if (adapter->vxlan_port != port)
4402 return;
4403
4404 be_disable_vxlan_offloads(adapter);
4405
4406 dev_info(&adapter->pdev->dev,
4407 "Disabled VxLAN offloads for UDP port %d\n",
4408 be16_to_cpu(port));
4409}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304410#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304411
stephen hemmingere5686ad2012-01-05 19:10:25 +00004412static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004413 .ndo_open = be_open,
4414 .ndo_stop = be_close,
4415 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004416 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004417 .ndo_set_mac_address = be_mac_addr_set,
4418 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004419 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004420 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004421 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4422 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004423 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004424 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004425 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004426 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304427 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004428#ifdef CONFIG_NET_POLL_CONTROLLER
4429 .ndo_poll_controller = be_netpoll,
4430#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004431 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4432 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304433#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304434 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304435#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304436#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304437 .ndo_add_vxlan_port = be_add_vxlan_port,
4438 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304439#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004440};
4441
4442static void be_netdev_init(struct net_device *netdev)
4443{
4444 struct be_adapter *adapter = netdev_priv(netdev);
4445
Sathya Perlac9c47142014-03-27 10:46:19 +05304446 if (skyhawk_chip(adapter)) {
4447 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4448 NETIF_F_TSO | NETIF_F_TSO6 |
4449 NETIF_F_GSO_UDP_TUNNEL;
4450 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4451 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004452 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004453 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004454 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004455 if (be_multi_rxq(adapter))
4456 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004457
4458 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004459 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004460
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004461 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004462 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004463
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004464 netdev->priv_flags |= IFF_UNICAST_FLT;
4465
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004466 netdev->flags |= IFF_MULTICAST;
4467
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004468 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004469
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004470 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004471
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004472 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004473}
4474
4475static void be_unmap_pci_bars(struct be_adapter *adapter)
4476{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004477 if (adapter->csr)
4478 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004479 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004480 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004481}
4482
Sathya Perlace66f782012-11-06 17:48:58 +00004483static int db_bar(struct be_adapter *adapter)
4484{
4485 if (lancer_chip(adapter) || !be_physfn(adapter))
4486 return 0;
4487 else
4488 return 4;
4489}
4490
4491static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004492{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004493 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004494 adapter->roce_db.size = 4096;
4495 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4496 db_bar(adapter));
4497 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4498 db_bar(adapter));
4499 }
Parav Pandit045508a2012-03-26 14:27:13 +00004500 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004501}
4502
4503static int be_map_pci_bars(struct be_adapter *adapter)
4504{
4505 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004506
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004507 if (BEx_chip(adapter) && be_physfn(adapter)) {
4508 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304509 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004510 return -ENOMEM;
4511 }
4512
Sathya Perlace66f782012-11-06 17:48:58 +00004513 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304514 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004516 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004517
4518 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004519 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004520
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304522 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004523 be_unmap_pci_bars(adapter);
4524 return -ENOMEM;
4525}
4526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527static void be_ctrl_cleanup(struct be_adapter *adapter)
4528{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004529 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004530
4531 be_unmap_pci_bars(adapter);
4532
4533 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004534 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4535 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004536
Sathya Perla5b8821b2011-08-02 19:57:44 +00004537 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004538 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004539 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4540 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004541}
4542
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004543static int be_ctrl_init(struct be_adapter *adapter)
4544{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004545 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4546 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004547 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004548 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004550
Sathya Perlace66f782012-11-06 17:48:58 +00004551 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4552 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4553 SLI_INTF_FAMILY_SHIFT;
4554 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4555
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004556 status = be_map_pci_bars(adapter);
4557 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004558 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004559
4560 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004561 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4562 mbox_mem_alloc->size,
4563 &mbox_mem_alloc->dma,
4564 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004565 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004566 status = -ENOMEM;
4567 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568 }
4569 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4570 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4571 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4572 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004573
Sathya Perla5b8821b2011-08-02 19:57:44 +00004574 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004575 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4576 rx_filter->size, &rx_filter->dma,
4577 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304578 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004579 status = -ENOMEM;
4580 goto free_mbox;
4581 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004582
Ivan Vecera29849612010-12-14 05:43:19 +00004583 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004584 spin_lock_init(&adapter->mcc_lock);
4585 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004586
Suresh Reddy5eeff632014-01-06 13:02:24 +05304587 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004588 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004589 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004590
4591free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004592 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4593 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004594
4595unmap_pci_bars:
4596 be_unmap_pci_bars(adapter);
4597
4598done:
4599 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004600}
4601
4602static void be_stats_cleanup(struct be_adapter *adapter)
4603{
Sathya Perla3abcded2010-10-03 22:12:27 -07004604 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004605
4606 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004607 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4608 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004609}
4610
4611static int be_stats_init(struct be_adapter *adapter)
4612{
Sathya Perla3abcded2010-10-03 22:12:27 -07004613 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004614
Sathya Perlaca34fe32012-11-06 17:48:56 +00004615 if (lancer_chip(adapter))
4616 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4617 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004618 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004619 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004620 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004621 else
4622 /* ALL non-BE ASICs */
4623 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004624
Joe Perchesede23fa82013-08-26 22:45:23 -07004625 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4626 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304627 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304628 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004629 return 0;
4630}
4631
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004632static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004633{
4634 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004636 if (!adapter)
4637 return;
4638
Parav Pandit045508a2012-03-26 14:27:13 +00004639 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004640 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004641
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004642 cancel_delayed_work_sync(&adapter->func_recovery_work);
4643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004644 unregister_netdev(adapter->netdev);
4645
Sathya Perla5fb379e2009-06-18 00:02:59 +00004646 be_clear(adapter);
4647
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004648 /* tell fw we're done with firing cmds */
4649 be_cmd_fw_clean(adapter);
4650
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004651 be_stats_cleanup(adapter);
4652
4653 be_ctrl_cleanup(adapter);
4654
Sathya Perlad6b6d982012-09-05 01:56:48 +00004655 pci_disable_pcie_error_reporting(pdev);
4656
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004657 pci_release_regions(pdev);
4658 pci_disable_device(pdev);
4659
4660 free_netdev(adapter->netdev);
4661}
4662
Sathya Perla39f1d942012-05-08 19:41:24 +00004663static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004664{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304665 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004666
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004667 status = be_cmd_get_cntl_attributes(adapter);
4668 if (status)
4669 return status;
4670
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004671 /* Must be a power of 2 or else MODULO will BUG_ON */
4672 adapter->be_get_temp_freq = 64;
4673
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304674 if (BEx_chip(adapter)) {
4675 level = be_cmd_get_fw_log_level(adapter);
4676 adapter->msg_enable =
4677 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4678 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004679
Sathya Perla92bf14a2013-08-27 16:57:32 +05304680 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004681 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004682}
4683
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004684static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004685{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004686 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004687 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004688
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004689 status = lancer_test_and_set_rdy_state(adapter);
4690 if (status)
4691 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004692
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004693 if (netif_running(adapter->netdev))
4694 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004695
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004696 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004697
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004698 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004699
4700 status = be_setup(adapter);
4701 if (status)
4702 goto err;
4703
4704 if (netif_running(adapter->netdev)) {
4705 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004706 if (status)
4707 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004708 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004709
Somnath Kotur4bebb562013-12-05 12:07:55 +05304710 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004711 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004712err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004713 if (status == -EAGAIN)
4714 dev_err(dev, "Waiting for resource provisioning\n");
4715 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304716 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004717
4718 return status;
4719}
4720
4721static void be_func_recovery_task(struct work_struct *work)
4722{
4723 struct be_adapter *adapter =
4724 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004725 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004726
4727 be_detect_error(adapter);
4728
4729 if (adapter->hw_error && lancer_chip(adapter)) {
4730
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004731 rtnl_lock();
4732 netif_device_detach(adapter->netdev);
4733 rtnl_unlock();
4734
4735 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004736 if (!status)
4737 netif_device_attach(adapter->netdev);
4738 }
4739
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004740 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4741 * no need to attempt further recovery.
4742 */
4743 if (!status || status == -EAGAIN)
4744 schedule_delayed_work(&adapter->func_recovery_work,
4745 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004746}
4747
4748static void be_worker(struct work_struct *work)
4749{
4750 struct be_adapter *adapter =
4751 container_of(work, struct be_adapter, work.work);
4752 struct be_rx_obj *rxo;
4753 int i;
4754
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004755 /* when interrupts are not yet enabled, just reap any pending
4756 * mcc completions */
4757 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004758 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004759 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004760 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004761 goto reschedule;
4762 }
4763
4764 if (!adapter->stats_cmd_sent) {
4765 if (lancer_chip(adapter))
4766 lancer_cmd_get_pport_stats(adapter,
4767 &adapter->stats_cmd);
4768 else
4769 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4770 }
4771
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304772 if (be_physfn(adapter) &&
4773 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004774 be_cmd_get_die_temperature(adapter);
4775
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004776 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304777 /* Replenish RX-queues starved due to memory
4778 * allocation failures.
4779 */
4780 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004781 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004782 }
4783
Sathya Perla2632baf2013-10-01 16:00:00 +05304784 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004785
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004786reschedule:
4787 adapter->work_counter++;
4788 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4789}
4790
Sathya Perla257a3fe2013-06-14 15:54:51 +05304791/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004792static bool be_reset_required(struct be_adapter *adapter)
4793{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304794 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004795}
4796
Sathya Perlad3791422012-09-28 04:39:44 +00004797static char *mc_name(struct be_adapter *adapter)
4798{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304799 char *str = ""; /* default */
4800
4801 switch (adapter->mc_type) {
4802 case UMC:
4803 str = "UMC";
4804 break;
4805 case FLEX10:
4806 str = "FLEX10";
4807 break;
4808 case vNIC1:
4809 str = "vNIC-1";
4810 break;
4811 case nPAR:
4812 str = "nPAR";
4813 break;
4814 case UFP:
4815 str = "UFP";
4816 break;
4817 case vNIC2:
4818 str = "vNIC-2";
4819 break;
4820 default:
4821 str = "";
4822 }
4823
4824 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004825}
4826
4827static inline char *func_name(struct be_adapter *adapter)
4828{
4829 return be_physfn(adapter) ? "PF" : "VF";
4830}
4831
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004832static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004833{
4834 int status = 0;
4835 struct be_adapter *adapter;
4836 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004837 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004838
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304839 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4840
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004841 status = pci_enable_device(pdev);
4842 if (status)
4843 goto do_none;
4844
4845 status = pci_request_regions(pdev, DRV_NAME);
4846 if (status)
4847 goto disable_dev;
4848 pci_set_master(pdev);
4849
Sathya Perla7f640062012-06-05 19:37:20 +00004850 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304851 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004852 status = -ENOMEM;
4853 goto rel_reg;
4854 }
4855 adapter = netdev_priv(netdev);
4856 adapter->pdev = pdev;
4857 pci_set_drvdata(pdev, adapter);
4858 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004859 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004860
Russell King4c15c242013-06-26 23:49:11 +01004861 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004862 if (!status) {
4863 netdev->features |= NETIF_F_HIGHDMA;
4864 } else {
Russell King4c15c242013-06-26 23:49:11 +01004865 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004866 if (status) {
4867 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4868 goto free_netdev;
4869 }
4870 }
4871
Ajit Khapardeea58c182013-10-18 16:06:24 -05004872 if (be_physfn(adapter)) {
4873 status = pci_enable_pcie_error_reporting(pdev);
4874 if (!status)
4875 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4876 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004877
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004878 status = be_ctrl_init(adapter);
4879 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004880 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004881
Sathya Perla2243e2e2009-11-22 22:02:03 +00004882 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004883 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004884 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004885 if (status)
4886 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004887 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004888
Sathya Perla39f1d942012-05-08 19:41:24 +00004889 if (be_reset_required(adapter)) {
4890 status = be_cmd_reset_function(adapter);
4891 if (status)
4892 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004893
Kalesh AP2d177be2013-04-28 22:22:29 +00004894 /* Wait for interrupts to quiesce after an FLR */
4895 msleep(100);
4896 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004897
4898 /* Allow interrupts for other ULPs running on NIC function */
4899 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004900
Kalesh AP2d177be2013-04-28 22:22:29 +00004901 /* tell fw we're ready to fire cmds */
4902 status = be_cmd_fw_init(adapter);
4903 if (status)
4904 goto ctrl_clean;
4905
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004906 status = be_stats_init(adapter);
4907 if (status)
4908 goto ctrl_clean;
4909
Sathya Perla39f1d942012-05-08 19:41:24 +00004910 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004911 if (status)
4912 goto stats_clean;
4913
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004914 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004915 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004916 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004917
Sathya Perla5fb379e2009-06-18 00:02:59 +00004918 status = be_setup(adapter);
4919 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004920 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004921
Sathya Perla3abcded2010-10-03 22:12:27 -07004922 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004923 status = register_netdev(netdev);
4924 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004925 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004926
Parav Pandit045508a2012-03-26 14:27:13 +00004927 be_roce_dev_add(adapter);
4928
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004929 schedule_delayed_work(&adapter->func_recovery_work,
4930 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004931
4932 be_cmd_query_port_name(adapter, &port_name);
4933
Sathya Perlad3791422012-09-28 04:39:44 +00004934 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4935 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004937 return 0;
4938
Sathya Perla5fb379e2009-06-18 00:02:59 +00004939unsetup:
4940 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004941stats_clean:
4942 be_stats_cleanup(adapter);
4943ctrl_clean:
4944 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004945free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004946 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004947rel_reg:
4948 pci_release_regions(pdev);
4949disable_dev:
4950 pci_disable_device(pdev);
4951do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004952 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004953 return status;
4954}
4955
4956static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4957{
4958 struct be_adapter *adapter = pci_get_drvdata(pdev);
4959 struct net_device *netdev = adapter->netdev;
4960
Suresh Reddy76a9e082014-01-15 13:23:40 +05304961 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004962 be_setup_wol(adapter, true);
4963
Ajit Khaparded4360d62013-11-22 12:51:09 -06004964 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004965 cancel_delayed_work_sync(&adapter->func_recovery_work);
4966
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004967 netif_device_detach(netdev);
4968 if (netif_running(netdev)) {
4969 rtnl_lock();
4970 be_close(netdev);
4971 rtnl_unlock();
4972 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004973 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004974
4975 pci_save_state(pdev);
4976 pci_disable_device(pdev);
4977 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4978 return 0;
4979}
4980
4981static int be_resume(struct pci_dev *pdev)
4982{
4983 int status = 0;
4984 struct be_adapter *adapter = pci_get_drvdata(pdev);
4985 struct net_device *netdev = adapter->netdev;
4986
4987 netif_device_detach(netdev);
4988
4989 status = pci_enable_device(pdev);
4990 if (status)
4991 return status;
4992
Yijing Wang1ca01512013-06-27 20:53:42 +08004993 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004994 pci_restore_state(pdev);
4995
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304996 status = be_fw_wait_ready(adapter);
4997 if (status)
4998 return status;
4999
Ajit Khaparded4360d62013-11-22 12:51:09 -06005000 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005001 /* tell fw we're ready to fire cmds */
5002 status = be_cmd_fw_init(adapter);
5003 if (status)
5004 return status;
5005
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005006 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005007 if (netif_running(netdev)) {
5008 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005009 be_open(netdev);
5010 rtnl_unlock();
5011 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005012
5013 schedule_delayed_work(&adapter->func_recovery_work,
5014 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005015 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005016
Suresh Reddy76a9e082014-01-15 13:23:40 +05305017 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005018 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005019
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005020 return 0;
5021}
5022
Sathya Perla82456b02010-02-17 01:35:37 +00005023/*
5024 * An FLR will stop BE from DMAing any data.
5025 */
5026static void be_shutdown(struct pci_dev *pdev)
5027{
5028 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005029
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005030 if (!adapter)
5031 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005032
Devesh Sharmad114f992014-06-10 19:32:15 +05305033 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005034 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005035 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005036
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005037 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005038
Ajit Khaparde57841862011-04-06 18:08:43 +00005039 be_cmd_reset_function(adapter);
5040
Sathya Perla82456b02010-02-17 01:35:37 +00005041 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005042}
5043
Sathya Perlacf588472010-02-14 21:22:01 +00005044static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305045 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005046{
5047 struct be_adapter *adapter = pci_get_drvdata(pdev);
5048 struct net_device *netdev = adapter->netdev;
5049
5050 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5051
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005052 if (!adapter->eeh_error) {
5053 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005054
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005055 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005056
Sathya Perlacf588472010-02-14 21:22:01 +00005057 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005058 netif_device_detach(netdev);
5059 if (netif_running(netdev))
5060 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005061 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005062
5063 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005064 }
Sathya Perlacf588472010-02-14 21:22:01 +00005065
5066 if (state == pci_channel_io_perm_failure)
5067 return PCI_ERS_RESULT_DISCONNECT;
5068
5069 pci_disable_device(pdev);
5070
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005071 /* The error could cause the FW to trigger a flash debug dump.
5072 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005073 * can cause it not to recover; wait for it to finish.
5074 * Wait only for first function as it is needed only once per
5075 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005076 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005077 if (pdev->devfn == 0)
5078 ssleep(30);
5079
Sathya Perlacf588472010-02-14 21:22:01 +00005080 return PCI_ERS_RESULT_NEED_RESET;
5081}
5082
5083static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5084{
5085 struct be_adapter *adapter = pci_get_drvdata(pdev);
5086 int status;
5087
5088 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005089
5090 status = pci_enable_device(pdev);
5091 if (status)
5092 return PCI_ERS_RESULT_DISCONNECT;
5093
5094 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005095 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005096 pci_restore_state(pdev);
5097
5098 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005099 dev_info(&adapter->pdev->dev,
5100 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005101 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005102 if (status)
5103 return PCI_ERS_RESULT_DISCONNECT;
5104
Sathya Perlad6b6d982012-09-05 01:56:48 +00005105 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005106 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005107 return PCI_ERS_RESULT_RECOVERED;
5108}
5109
5110static void be_eeh_resume(struct pci_dev *pdev)
5111{
5112 int status = 0;
5113 struct be_adapter *adapter = pci_get_drvdata(pdev);
5114 struct net_device *netdev = adapter->netdev;
5115
5116 dev_info(&adapter->pdev->dev, "EEH resume\n");
5117
5118 pci_save_state(pdev);
5119
Kalesh AP2d177be2013-04-28 22:22:29 +00005120 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005121 if (status)
5122 goto err;
5123
Kalesh AP03a58ba2014-05-13 14:03:11 +05305124 /* On some BE3 FW versions, after a HW reset,
5125 * interrupts will remain disabled for each function.
5126 * So, explicitly enable interrupts
5127 */
5128 be_intr_set(adapter, true);
5129
Kalesh AP2d177be2013-04-28 22:22:29 +00005130 /* tell fw we're ready to fire cmds */
5131 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005132 if (status)
5133 goto err;
5134
Sathya Perlacf588472010-02-14 21:22:01 +00005135 status = be_setup(adapter);
5136 if (status)
5137 goto err;
5138
5139 if (netif_running(netdev)) {
5140 status = be_open(netdev);
5141 if (status)
5142 goto err;
5143 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005144
5145 schedule_delayed_work(&adapter->func_recovery_work,
5146 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005147 netif_device_attach(netdev);
5148 return;
5149err:
5150 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005151}
5152
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005153static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005154 .error_detected = be_eeh_err_detected,
5155 .slot_reset = be_eeh_reset,
5156 .resume = be_eeh_resume,
5157};
5158
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005159static struct pci_driver be_driver = {
5160 .name = DRV_NAME,
5161 .id_table = be_dev_ids,
5162 .probe = be_probe,
5163 .remove = be_remove,
5164 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005165 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005166 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005167 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005168};
5169
5170static int __init be_init_module(void)
5171{
Joe Perches8e95a202009-12-03 07:58:21 +00005172 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5173 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005174 printk(KERN_WARNING DRV_NAME
5175 " : Module param rx_frag_size must be 2048/4096/8192."
5176 " Using 2048\n");
5177 rx_frag_size = 2048;
5178 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005179
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005180 return pci_register_driver(&be_driver);
5181}
5182module_init(be_init_module);
5183
5184static void __exit be_exit_module(void)
5185{
5186 pci_unregister_driver(&be_driver);
5187}
5188module_exit(be_exit_module);