blob: a6cf6c75971e61c72c542fc9b2315fdaf34af4b6 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530749 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530751 proto = skb_inner_ip_proto(skb);
752 } else {
753 proto = skb_ip_proto(skb);
754 }
755 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 }
760
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700761 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
771 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530775 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000776{
777 dma_addr_t dma;
778
779 be_dws_le_to_cpu(wrb, sizeof(*wrb));
780
781 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000782 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000783 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000784 dma_unmap_single(dev, dma, wrb->frag_len,
785 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000786 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000787 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 }
789}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790
Sathya Perla3c8def92011-06-12 20:01:58 +0000791static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530792 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
793 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794{
Sathya Perla7101e112010-03-22 20:41:12 +0000795 dma_addr_t busaddr;
796 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000797 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct be_eth_wrb *wrb;
800 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000801 bool map_single = false;
802 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 hdr = queue_head_node(txq);
805 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000806 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
David S. Millerebc8d2a2009-06-09 01:01:31 -0700808 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700809 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000812 goto dma_err;
813 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 wrb = queue_head_node(txq);
815 wrb_fill(wrb, busaddr, len);
816 be_dws_cpu_to_le(wrb, sizeof(*wrb));
817 queue_head_inc(txq);
818 copied += len;
819 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000823 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000824 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000825 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000826 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700827 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000828 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700829 be_dws_cpu_to_le(wrb, sizeof(*wrb));
830 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000831 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 }
833
834 if (dummy_wrb) {
835 wrb = queue_head_node(txq);
836 wrb_fill(wrb, 0, 0);
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
839 }
840
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 be_dws_cpu_to_le(hdr, sizeof(*hdr));
843
844 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000845dma_err:
846 txq->head = map_head;
847 while (copied) {
848 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000849 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000850 map_single = false;
851 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530852 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301076 struct device *dev = &adapter->pdev->dev;
1077
1078 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1079 dev_info(dev, "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 return -EINVAL;
1082 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301083
1084 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Vasundhara Volam50762662014-09-12 17:39:14 +05301096 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001097 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301098 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001099 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001100
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001101 /* No need to further configure vids if in promiscuous mode */
1102 if (adapter->promiscuous)
1103 return 0;
1104
Sathya Perla92bf14a2013-08-27 16:57:32 +05301105 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001106 goto set_vlan_promisc;
1107
1108 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301109 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1110 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001111
Kalesh AP4d567d92014-05-09 13:29:17 +05301112 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001113 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001114 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301115 if (addl_status(status) ==
1116 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001117 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301118 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301125 dev_info(dev,
1126 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301140 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301143 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001144 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145}
1146
Patrick McHardy80d5c362013-04-19 02:04:28 +00001147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001150 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301154 return status;
1155
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301156 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301157 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001158
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301159 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301160 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001161
Somnath Kotura6b74e02014-01-21 15:50:55 +05301162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301165 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301166 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301167
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001168 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169}
1170
Patrick McHardy80d5c362013-04-19 02:04:28 +00001171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
1174
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001175 /* Packets with VID 0 are always received by Lancer by default */
1176 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301177 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001178
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301179 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301180 adapter->vlans_added--;
1181
1182 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183}
1184
Somnath kotur7ad09452014-03-03 14:24:43 +05301185static void be_clear_promisc(struct be_adapter *adapter)
1186{
1187 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301188 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301189
1190 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1191}
1192
Sathya Perlaa54769f2011-10-24 02:45:00 +00001193static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194{
1195 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001196 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
1198 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001199 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001200 adapter->promiscuous = true;
1201 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001203
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001204 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001205 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301206 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001207 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001208 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001209 }
1210
Sathya Perlae7b909a2009-11-22 22:01:10 +00001211 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001212 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301213 netdev_mc_count(netdev) > be_max_mc(adapter))
1214 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001215
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001216 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1217 struct netdev_hw_addr *ha;
1218 int i = 1; /* First slot is claimed by the Primary MAC */
1219
1220 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1221 be_cmd_pmac_del(adapter, adapter->if_handle,
1222 adapter->pmac_id[i], 0);
1223 }
1224
Sathya Perla92bf14a2013-08-27 16:57:32 +05301225 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001226 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1227 adapter->promiscuous = true;
1228 goto done;
1229 }
1230
1231 netdev_for_each_uc_addr(ha, adapter->netdev) {
1232 adapter->uc_macs++; /* First slot is for Primary MAC */
1233 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1234 adapter->if_handle,
1235 &adapter->pmac_id[adapter->uc_macs], 0);
1236 }
1237 }
1238
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001239 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301240 if (!status) {
1241 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1242 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1243 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001244 }
Kalesh APa0794882014-05-30 19:06:23 +05301245
1246set_mcast_promisc:
1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1248 return;
1249
1250 /* Set to MCAST promisc mode if setting MULTICAST address fails
1251 * or if num configured exceeds what we support
1252 */
1253 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1254 if (!status)
1255 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001256done:
1257 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258}
1259
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001264 int status;
1265
Sathya Perla11ac75e2011-12-13 00:58:50 +00001266 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001267 return -EPERM;
1268
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001270 return -EINVAL;
1271
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301272 /* Proceed further only if user provided MAC is different
1273 * from active MAC
1274 */
1275 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1276 return 0;
1277
Sathya Perla3175d8c2013-07-23 15:25:03 +05301278 if (BEx_chip(adapter)) {
1279 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1280 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001281
Sathya Perla11ac75e2011-12-13 00:58:50 +00001282 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1283 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301284 } else {
1285 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1286 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001287 }
1288
Kalesh APabccf232014-07-17 16:20:24 +05301289 if (status) {
1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1291 mac, vf, status);
1292 return be_cmd_status(status);
1293 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001294
Kalesh APabccf232014-07-17 16:20:24 +05301295 ether_addr_copy(vf_cfg->mac_addr, mac);
1296
1297 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001298}
1299
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001300static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301301 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001304 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001305
Sathya Perla11ac75e2011-12-13 00:58:50 +00001306 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001307 return -EPERM;
1308
Sathya Perla11ac75e2011-12-13 00:58:50 +00001309 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001310 return -EINVAL;
1311
1312 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001313 vi->max_tx_rate = vf_cfg->tx_rate;
1314 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001315 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1316 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001317 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301318 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319
1320 return 0;
1321}
1322
Sathya Perla748b5392014-05-09 13:29:13 +05301323static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001324{
1325 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001326 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001327 int status = 0;
1328
Sathya Perla11ac75e2011-12-13 00:58:50 +00001329 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001330 return -EPERM;
1331
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001332 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001333 return -EINVAL;
1334
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001335 if (vlan || qos) {
1336 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301337 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001338 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1339 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001340 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001341 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301342 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1343 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001344 }
1345
Kalesh APabccf232014-07-17 16:20:24 +05301346 if (status) {
1347 dev_err(&adapter->pdev->dev,
1348 "VLAN %d config on VF %d failed : %#x\n", vlan,
1349 vf, status);
1350 return be_cmd_status(status);
1351 }
1352
1353 vf_cfg->vlan_tag = vlan;
1354
1355 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001356}
1357
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001358static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1359 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001360{
1361 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301362 struct device *dev = &adapter->pdev->dev;
1363 int percent_rate, status = 0;
1364 u16 link_speed = 0;
1365 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001366
Sathya Perla11ac75e2011-12-13 00:58:50 +00001367 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368 return -EPERM;
1369
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001370 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001371 return -EINVAL;
1372
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001373 if (min_tx_rate)
1374 return -EINVAL;
1375
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301376 if (!max_tx_rate)
1377 goto config_qos;
1378
1379 status = be_cmd_link_status_query(adapter, &link_speed,
1380 &link_status, 0);
1381 if (status)
1382 goto err;
1383
1384 if (!link_status) {
1385 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301386 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301387 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001388 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001389
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301390 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1391 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1392 link_speed);
1393 status = -EINVAL;
1394 goto err;
1395 }
1396
1397 /* On Skyhawk the QOS setting must be done only as a % value */
1398 percent_rate = link_speed / 100;
1399 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1400 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1401 percent_rate);
1402 status = -EINVAL;
1403 goto err;
1404 }
1405
1406config_qos:
1407 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001408 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301409 goto err;
1410
1411 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1412 return 0;
1413
1414err:
1415 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1416 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301417 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001418}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301419static int be_set_vf_link_state(struct net_device *netdev, int vf,
1420 int link_state)
1421{
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 int status;
1424
1425 if (!sriov_enabled(adapter))
1426 return -EPERM;
1427
1428 if (vf >= adapter->num_vfs)
1429 return -EINVAL;
1430
1431 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301432 if (status) {
1433 dev_err(&adapter->pdev->dev,
1434 "Link state change on VF %d failed: %#x\n", vf, status);
1435 return be_cmd_status(status);
1436 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301437
Kalesh APabccf232014-07-17 16:20:24 +05301438 adapter->vf_cfg[vf].plink_tracking = link_state;
1439
1440 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301441}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001442
Sathya Perla2632baf2013-10-01 16:00:00 +05301443static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1444 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perla2632baf2013-10-01 16:00:00 +05301446 aic->rx_pkts_prev = rx_pkts;
1447 aic->tx_reqs_prev = tx_pkts;
1448 aic->jiffies = now;
1449}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001450
Sathya Perla2632baf2013-10-01 16:00:00 +05301451static void be_eqd_update(struct be_adapter *adapter)
1452{
1453 struct be_set_eqd set_eqd[MAX_EVT_QS];
1454 int eqd, i, num = 0, start;
1455 struct be_aic_obj *aic;
1456 struct be_eq_obj *eqo;
1457 struct be_rx_obj *rxo;
1458 struct be_tx_obj *txo;
1459 u64 rx_pkts, tx_pkts;
1460 ulong now;
1461 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001462
Sathya Perla2632baf2013-10-01 16:00:00 +05301463 for_all_evt_queues(adapter, eqo, i) {
1464 aic = &adapter->aic_obj[eqo->idx];
1465 if (!aic->enable) {
1466 if (aic->jiffies)
1467 aic->jiffies = 0;
1468 eqd = aic->et_eqd;
1469 goto modify_eqd;
1470 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Sathya Perla2632baf2013-10-01 16:00:00 +05301472 rxo = &adapter->rx_obj[eqo->idx];
1473 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001474 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301475 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001476 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477
Sathya Perla2632baf2013-10-01 16:00:00 +05301478 txo = &adapter->tx_obj[eqo->idx];
1479 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001480 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301481 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001482 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001483
Sathya Perla4097f662009-03-24 16:40:13 -07001484
Sathya Perla2632baf2013-10-01 16:00:00 +05301485 /* Skip, if wrapped around or first calculation */
1486 now = jiffies;
1487 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1488 rx_pkts < aic->rx_pkts_prev ||
1489 tx_pkts < aic->tx_reqs_prev) {
1490 be_aic_update(aic, rx_pkts, tx_pkts, now);
1491 continue;
1492 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001493
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 delta = jiffies_to_msecs(now - aic->jiffies);
1495 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1496 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1497 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498
Sathya Perla2632baf2013-10-01 16:00:00 +05301499 if (eqd < 8)
1500 eqd = 0;
1501 eqd = min_t(u32, eqd, aic->max_eqd);
1502 eqd = max_t(u32, eqd, aic->min_eqd);
1503
1504 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301506 if (eqd != aic->prev_eqd) {
1507 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1508 set_eqd[num].eq_id = eqo->q.id;
1509 aic->prev_eqd = eqd;
1510 num++;
1511 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001512 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301513
1514 if (num)
1515 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001516}
1517
Sathya Perla3abcded2010-10-03 22:12:27 -07001518static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301519 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001520{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001521 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001522
Sathya Perlaab1594e2011-07-25 19:10:15 +00001523 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001524 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001525 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001526 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001528 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001530 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001531 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532}
1533
Sathya Perla2e588f82011-03-11 02:49:26 +00001534static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001535{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001536 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301537 * Also ignore ipcksm for ipv6 pkts
1538 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301540 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001541}
1542
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301543static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001545 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301548 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549
Sathya Perla3abcded2010-10-03 22:12:27 -07001550 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 BUG_ON(!rx_page_info->page);
1552
Sathya Perlae50287b2014-03-04 12:14:38 +05301553 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001554 dma_unmap_page(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301557 rx_page_info->last_frag = false;
1558 } else {
1559 dma_sync_single_for_cpu(&adapter->pdev->dev,
1560 dma_unmap_addr(rx_page_info, bus),
1561 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001562 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301564 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 atomic_dec(&rxq->used);
1566 return rx_page_info;
1567}
1568
1569/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570static void be_rx_compl_discard(struct be_rx_obj *rxo,
1571 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001574 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001576 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301577 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001578 put_page(page_info->page);
1579 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 }
1581}
1582
1583/*
1584 * skb_fill_rx_data forms a complete skb for an ether frame
1585 * indicated by rxcp.
1586 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001587static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1588 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001591 u16 i, j;
1592 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 u8 *start;
1594
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301595 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 start = page_address(page_info->page) + page_info->page_offset;
1597 prefetch(start);
1598
1599 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001600 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 skb->len = curr_frag_len;
1603 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001604 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 /* Complete packet has now been moved to data */
1606 put_page(page_info->page);
1607 skb->data_len = 0;
1608 skb->tail += curr_frag_len;
1609 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001610 hdr_len = ETH_HLEN;
1611 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001613 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 skb_shinfo(skb)->frags[0].page_offset =
1615 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301616 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1617 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001619 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 skb->tail += hdr_len;
1621 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001622 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
Sathya Perla2e588f82011-03-11 02:49:26 +00001624 if (rxcp->pkt_size <= rx_frag_size) {
1625 BUG_ON(rxcp->num_rcvd != 1);
1626 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 }
1628
1629 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001630 remaining = rxcp->pkt_size - curr_frag_len;
1631 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301632 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001633 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001635 /* Coalesce all frags from the same physical page in one slot */
1636 if (page_info->page_offset == 0) {
1637 /* Fresh page */
1638 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001639 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001640 skb_shinfo(skb)->frags[j].page_offset =
1641 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001642 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001643 skb_shinfo(skb)->nr_frags++;
1644 } else {
1645 put_page(page_info->page);
1646 }
1647
Eric Dumazet9e903e02011-10-18 21:00:24 +00001648 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 skb->len += curr_frag_len;
1650 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001651 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001653 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001655 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656}
1657
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001658/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301659static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001660 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001663 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001665
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001666 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001667 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001668 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001669 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 return;
1671 }
1672
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001673 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001675 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001676 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001677 else
1678 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001680 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001681 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001682 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001683 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301684
Tom Herbertb6c0e892014-08-27 21:27:17 -07001685 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301686 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Jiri Pirko343e43c2011-08-25 02:50:51 +00001688 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001690
1691 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692}
1693
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001694/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001695static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1696 struct napi_struct *napi,
1697 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001699 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001701 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001702 u16 remaining, curr_frag_len;
1703 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001706 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001707 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708 return;
1709 }
1710
Sathya Perla2e588f82011-03-11 02:49:26 +00001711 remaining = rxcp->pkt_size;
1712 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301713 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714
1715 curr_frag_len = min(remaining, rx_frag_size);
1716
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001717 /* Coalesce all frags from the same physical page in one slot */
1718 if (i == 0 || page_info->page_offset == 0) {
1719 /* First frag or Fresh page */
1720 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001721 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001722 skb_shinfo(skb)->frags[j].page_offset =
1723 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001724 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001725 } else {
1726 put_page(page_info->page);
1727 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001728 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001729 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 memset(page_info, 0, sizeof(*page_info));
1732 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001733 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001735 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001736 skb->len = rxcp->pkt_size;
1737 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001738 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001739 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001740 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001741 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301742
Tom Herbertb6c0e892014-08-27 21:27:17 -07001743 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301744 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001745
Jiri Pirko343e43c2011-08-25 02:50:51 +00001746 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001747 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001749 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750}
1751
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001752static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1753 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301755 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1756 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1757 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1758 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1759 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1760 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1761 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1762 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1763 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1764 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1765 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001766 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301767 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1768 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001769 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301770 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301771 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301772 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001773}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001775static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1776 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001777{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301778 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1779 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1780 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1781 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1782 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1783 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1784 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1785 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1786 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1787 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1788 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001789 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301790 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1791 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001792 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301793 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1794 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001795}
1796
1797static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1798{
1799 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1800 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1801 struct be_adapter *adapter = rxo->adapter;
1802
1803 /* For checking the valid bit it is Ok to use either definition as the
1804 * valid bit is at the same position in both v0 and v1 Rx compl */
1805 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 return NULL;
1807
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001808 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001809 be_dws_le_to_cpu(compl, sizeof(*compl));
1810
1811 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001812 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001813 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001814 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001815
Somnath Koture38b1702013-05-29 22:55:56 +00001816 if (rxcp->ip_frag)
1817 rxcp->l4_csum = 0;
1818
Sathya Perla15d72182011-03-21 20:49:26 +00001819 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301820 /* In QNQ modes, if qnq bit is not set, then the packet was
1821 * tagged only with the transparent outer vlan-tag and must
1822 * not be treated as a vlan packet by host
1823 */
1824 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001825 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001826
Sathya Perla15d72182011-03-21 20:49:26 +00001827 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001828 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001829
Somnath Kotur939cf302011-08-18 21:51:49 -07001830 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301831 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001832 rxcp->vlanf = 0;
1833 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001834
1835 /* As the compl has been parsed, reset it; we wont touch it again */
1836 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 return rxcp;
1840}
1841
Eric Dumazet1829b082011-03-01 05:48:12 +00001842static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001845
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001847 gfp |= __GFP_COMP;
1848 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849}
1850
1851/*
1852 * Allocate a page, split it to fragments of size rx_frag_size and post as
1853 * receive buffers to BE
1854 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301855static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856{
Sathya Perla3abcded2010-10-03 22:12:27 -07001857 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001858 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001859 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001861 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 struct be_eth_rx_d *rxd;
1863 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301864 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301867 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001869 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001871 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 break;
1873 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001874 page_dmaaddr = dma_map_page(dev, pagep, 0,
1875 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001876 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001877 if (dma_mapping_error(dev, page_dmaaddr)) {
1878 put_page(pagep);
1879 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301880 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001881 break;
1882 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301883 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 } else {
1885 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301886 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301888 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
1891 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301892 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1894 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895
1896 /* Any space left in the current big page for another frag? */
1897 if ((page_offset + rx_frag_size + rx_frag_size) >
1898 adapter->big_page_size) {
1899 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_info->last_frag = true;
1901 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1902 } else {
1903 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001905
1906 prev_page_info = page_info;
1907 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301910
1911 /* Mark the last frag of a page when we break out of the above loop
1912 * with no more slots available in the RXQ
1913 */
1914 if (pagep) {
1915 prev_page_info->last_frag = true;
1916 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1917 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
1919 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301921 if (rxo->rx_post_starved)
1922 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301923 do {
1924 notify = min(256u, posted);
1925 be_rxq_notify(adapter, rxq->id, notify);
1926 posted -= notify;
1927 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001928 } else if (atomic_read(&rxq->used) == 0) {
1929 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001930 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932}
1933
Sathya Perla5fb379e2009-06-18 00:02:59 +00001934static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1937
1938 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1939 return NULL;
1940
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001941 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1943
1944 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1945
1946 queue_tail_inc(tx_cq);
1947 return txcp;
1948}
1949
Sathya Perla3c8def92011-06-12 20:01:58 +00001950static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301951 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
Sathya Perla3c8def92011-06-12 20:01:58 +00001953 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001954 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001955 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001957 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1958 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001960 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001962 sent_skbs[txq->tail] = NULL;
1963
1964 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001965 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001967 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001969 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001970 unmap_tx_frag(&adapter->pdev->dev, wrb,
1971 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001972 unmap_skb_hdr = false;
1973
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 num_wrbs++;
1975 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001976 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977
Rick Jones96d49222014-08-28 08:53:16 -07001978 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001979 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980}
1981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982/* Return the number of events in the event queue */
1983static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001984{
1985 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001986 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001987
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 do {
1989 eqe = queue_tail_node(&eqo->q);
1990 if (eqe->evt == 0)
1991 break;
1992
1993 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001994 eqe->evt = 0;
1995 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 queue_tail_inc(&eqo->q);
1997 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001998
1999 return num;
2000}
2001
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002/* Leaves the EQ is disarmed state */
2003static void be_eq_clean(struct be_eq_obj *eqo)
2004{
2005 int num = events_get(eqo);
2006
2007 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2008}
2009
2010static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011{
2012 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002013 struct be_queue_info *rxq = &rxo->q;
2014 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002015 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002016 struct be_adapter *adapter = rxo->adapter;
2017 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018
Sathya Perlad23e9462012-12-17 19:38:51 +00002019 /* Consume pending rx completions.
2020 * Wait for the flush completion (identified by zero num_rcvd)
2021 * to arrive. Notify CQ even when there are no more CQ entries
2022 * for HW to flush partially coalesced CQ entries.
2023 * In Lancer, there is no need to wait for flush compl.
2024 */
2025 for (;;) {
2026 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302027 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002028 if (lancer_chip(adapter))
2029 break;
2030
2031 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2032 dev_warn(&adapter->pdev->dev,
2033 "did not receive flush compl\n");
2034 break;
2035 }
2036 be_cq_notify(adapter, rx_cq->id, true, 0);
2037 mdelay(1);
2038 } else {
2039 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002040 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002041 if (rxcp->num_rcvd == 0)
2042 break;
2043 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044 }
2045
Sathya Perlad23e9462012-12-17 19:38:51 +00002046 /* After cleanup, leave the CQ in unarmed state */
2047 be_cq_notify(adapter, rx_cq->id, false, 0);
2048
2049 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302050 while (atomic_read(&rxq->used) > 0) {
2051 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052 put_page(page_info->page);
2053 memset(page_info, 0, sizeof(*page_info));
2054 }
2055 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002056 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057}
2058
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002059static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002061 struct be_tx_obj *txo;
2062 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002063 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002064 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002065 struct sk_buff *sent_skb;
2066 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002067 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302069 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002070 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002071 pending_txqs = adapter->num_tx_qs;
2072
2073 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302074 cmpl = 0;
2075 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002076 txq = &txo->q;
2077 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302078 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002079 num_wrbs += be_tx_compl_process(adapter, txo,
2080 end_idx);
2081 cmpl++;
2082 }
2083 if (cmpl) {
2084 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2085 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302086 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002087 }
2088 if (atomic_read(&txq->used) == 0)
2089 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002090 }
2091
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302092 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002093 break;
2094
2095 mdelay(1);
2096 } while (true);
2097
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002098 for_all_tx_queues(adapter, txo, i) {
2099 txq = &txo->q;
2100 if (atomic_read(&txq->used))
2101 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2102 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002103
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002104 /* free posted tx for which compls will never arrive */
2105 while (atomic_read(&txq->used)) {
2106 sent_skb = txo->sent_skb_list[txq->tail];
2107 end_idx = txq->tail;
2108 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2109 &dummy_wrb);
2110 index_adv(&end_idx, num_wrbs - 1, txq->len);
2111 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2112 atomic_sub(num_wrbs, &txq->used);
2113 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002114 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115}
2116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117static void be_evt_queues_destroy(struct be_adapter *adapter)
2118{
2119 struct be_eq_obj *eqo;
2120 int i;
2121
2122 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002123 if (eqo->q.created) {
2124 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302126 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302127 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002128 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129 be_queue_free(adapter, &eqo->q);
2130 }
2131}
2132
2133static int be_evt_queues_create(struct be_adapter *adapter)
2134{
2135 struct be_queue_info *eq;
2136 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302137 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 int i, rc;
2139
Sathya Perla92bf14a2013-08-27 16:57:32 +05302140 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2141 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142
2143 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302144 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2145 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302146 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302147 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302150 aic->max_eqd = BE_MAX_EQD;
2151 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152
2153 eq = &eqo->q;
2154 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302155 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 if (rc)
2157 return rc;
2158
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302159 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 if (rc)
2161 return rc;
2162 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002163 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164}
2165
Sathya Perla5fb379e2009-06-18 00:02:59 +00002166static void be_mcc_queues_destroy(struct be_adapter *adapter)
2167{
2168 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002169
Sathya Perla8788fdc2009-07-27 22:52:03 +00002170 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002171 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002172 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173 be_queue_free(adapter, q);
2174
Sathya Perla8788fdc2009-07-27 22:52:03 +00002175 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178 be_queue_free(adapter, q);
2179}
2180
2181/* Must be called only after TX qs are created as MCC shares TX EQ */
2182static int be_mcc_queues_create(struct be_adapter *adapter)
2183{
2184 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185
Sathya Perla8788fdc2009-07-27 22:52:03 +00002186 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302188 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002189 goto err;
2190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 /* Use the default EQ for MCC completions */
2192 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193 goto mcc_cq_free;
2194
Sathya Perla8788fdc2009-07-27 22:52:03 +00002195 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2197 goto mcc_cq_destroy;
2198
Sathya Perla8788fdc2009-07-27 22:52:03 +00002199 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 goto mcc_q_free;
2201
2202 return 0;
2203
2204mcc_q_free:
2205 be_queue_free(adapter, q);
2206mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002207 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002208mcc_cq_free:
2209 be_queue_free(adapter, cq);
2210err:
2211 return -1;
2212}
2213
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214static void be_tx_queues_destroy(struct be_adapter *adapter)
2215{
2216 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002217 struct be_tx_obj *txo;
2218 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219
Sathya Perla3c8def92011-06-12 20:01:58 +00002220 for_all_tx_queues(adapter, txo, i) {
2221 q = &txo->q;
2222 if (q->created)
2223 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2224 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225
Sathya Perla3c8def92011-06-12 20:01:58 +00002226 q = &txo->cq;
2227 if (q->created)
2228 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2229 be_queue_free(adapter, q);
2230 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231}
2232
Sathya Perla77071332013-08-27 16:57:34 +05302233static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002235 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002236 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302237 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Sathya Perla92bf14a2013-08-27 16:57:32 +05302239 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002240
Sathya Perla3c8def92011-06-12 20:01:58 +00002241 for_all_tx_queues(adapter, txo, i) {
2242 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2244 sizeof(struct be_eth_tx_compl));
2245 if (status)
2246 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247
John Stultz827da442013-10-07 15:51:58 -07002248 u64_stats_init(&txo->stats.sync);
2249 u64_stats_init(&txo->stats.sync_compl);
2250
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 /* If num_evt_qs is less than num_tx_qs, then more than
2252 * one txq share an eq
2253 */
2254 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2255 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2256 if (status)
2257 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2260 sizeof(struct be_eth_wrb));
2261 if (status)
2262 return status;
2263
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002264 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 if (status)
2266 return status;
2267 }
2268
Sathya Perlad3791422012-09-28 04:39:44 +00002269 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2270 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002271 return 0;
2272}
2273
2274static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275{
2276 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 struct be_rx_obj *rxo;
2278 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279
Sathya Perla3abcded2010-10-03 22:12:27 -07002280 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002281 q = &rxo->cq;
2282 if (q->created)
2283 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2284 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286}
2287
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002289{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002291 struct be_rx_obj *rxo;
2292 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
Sathya Perla92bf14a2013-08-27 16:57:32 +05302294 /* We can create as many RSS rings as there are EQs. */
2295 adapter->num_rx_qs = adapter->num_evt_qs;
2296
2297 /* We'll use RSS only if atleast 2 RSS rings are supported.
2298 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302300 if (adapter->num_rx_qs > 1)
2301 adapter->num_rx_qs++;
2302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 for_all_rx_queues(adapter, rxo, i) {
2305 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002306 cq = &rxo->cq;
2307 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302308 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002309 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311
John Stultz827da442013-10-07 15:51:58 -07002312 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2314 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318
Sathya Perlad3791422012-09-28 04:39:44 +00002319 dev_info(&adapter->pdev->dev,
2320 "created %d RSS queue(s) and 1 default RX queue\n",
2321 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002323}
2324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325static irqreturn_t be_intx(int irq, void *dev)
2326{
Sathya Perlae49cc342012-11-27 19:50:02 +00002327 struct be_eq_obj *eqo = dev;
2328 struct be_adapter *adapter = eqo->adapter;
2329 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002331 /* IRQ is not expected when NAPI is scheduled as the EQ
2332 * will not be armed.
2333 * But, this can happen on Lancer INTx where it takes
2334 * a while to de-assert INTx or in BE2 where occasionaly
2335 * an interrupt may be raised even when EQ is unarmed.
2336 * If NAPI is already scheduled, then counting & notifying
2337 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002338 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002339 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002340 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002341 __napi_schedule(&eqo->napi);
2342 if (num_evts)
2343 eqo->spurious_intr = 0;
2344 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002345 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002346
2347 /* Return IRQ_HANDLED only for the the first spurious intr
2348 * after a valid intr to stop the kernel from branding
2349 * this irq as a bad one!
2350 */
2351 if (num_evts || eqo->spurious_intr++ == 0)
2352 return IRQ_HANDLED;
2353 else
2354 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355}
2356
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360
Sathya Perla0b545a62012-11-23 00:27:18 +00002361 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2362 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002363 return IRQ_HANDLED;
2364}
2365
Sathya Perla2e588f82011-03-11 02:49:26 +00002366static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367{
Somnath Koture38b1702013-05-29 22:55:56 +00002368 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369}
2370
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302372 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373{
Sathya Perla3abcded2010-10-03 22:12:27 -07002374 struct be_adapter *adapter = rxo->adapter;
2375 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002376 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302378 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379
2380 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002381 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382 if (!rxcp)
2383 break;
2384
Sathya Perla12004ae2011-08-02 19:57:46 +00002385 /* Is it a flush compl that has no data */
2386 if (unlikely(rxcp->num_rcvd == 0))
2387 goto loop_continue;
2388
2389 /* Discard compl with partial DMA Lancer B0 */
2390 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002392 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002393 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002394
Sathya Perla12004ae2011-08-02 19:57:46 +00002395 /* On BE drop pkts that arrive due to imperfect filtering in
2396 * promiscuous mode on some skews
2397 */
2398 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302399 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002401 goto loop_continue;
2402 }
2403
Sathya Perla6384a4d2013-10-25 10:40:16 +05302404 /* Don't do gro when we're busy_polling */
2405 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002407 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302408 be_rx_compl_process(rxo, napi, rxcp);
2409
Sathya Perla12004ae2011-08-02 19:57:46 +00002410loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302411 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002412 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413 }
2414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 if (work_done) {
2416 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002417
Sathya Perla6384a4d2013-10-25 10:40:16 +05302418 /* When an rx-obj gets into post_starved state, just
2419 * let be_worker do the posting.
2420 */
2421 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2422 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302423 be_post_rx_frags(rxo, GFP_ATOMIC,
2424 max_t(u32, MAX_RX_POST,
2425 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428 return work_done;
2429}
2430
Kalesh AP512bb8a2014-09-02 09:56:49 +05302431static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2432{
2433 switch (status) {
2434 case BE_TX_COMP_HDR_PARSE_ERR:
2435 tx_stats(txo)->tx_hdr_parse_err++;
2436 break;
2437 case BE_TX_COMP_NDMA_ERR:
2438 tx_stats(txo)->tx_dma_err++;
2439 break;
2440 case BE_TX_COMP_ACL_ERR:
2441 tx_stats(txo)->tx_spoof_check_err++;
2442 break;
2443 }
2444}
2445
2446static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2447{
2448 switch (status) {
2449 case LANCER_TX_COMP_LSO_ERR:
2450 tx_stats(txo)->tx_tso_err++;
2451 break;
2452 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2453 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2454 tx_stats(txo)->tx_spoof_check_err++;
2455 break;
2456 case LANCER_TX_COMP_QINQ_ERR:
2457 tx_stats(txo)->tx_qinq_err++;
2458 break;
2459 case LANCER_TX_COMP_PARITY_ERR:
2460 tx_stats(txo)->tx_internal_parity_err++;
2461 break;
2462 case LANCER_TX_COMP_DMA_ERR:
2463 tx_stats(txo)->tx_dma_err++;
2464 break;
2465 }
2466}
2467
Sathya Perlac8f64612014-09-02 09:56:55 +05302468static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2469 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302472 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302473 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302474 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002475
Sathya Perlac8f64612014-09-02 09:56:55 +05302476 while ((txcp = be_tx_compl_get(&txo->cq))) {
2477 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2478 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2479 work_done++;
2480
Kalesh AP512bb8a2014-09-02 09:56:49 +05302481 compl_status = GET_TX_COMPL_BITS(status, txcp);
2482 if (compl_status) {
2483 if (lancer_chip(adapter))
2484 lancer_update_tx_err(txo, compl_status);
2485 else
2486 be_update_tx_err(txo, compl_status);
2487 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002488 }
2489
2490 if (work_done) {
2491 be_cq_notify(adapter, txo->cq.id, true, work_done);
2492 atomic_sub(num_wrbs, &txo->q.used);
2493
2494 /* As Tx wrbs have been freed up, wake up netdev queue
2495 * if it was stopped due to lack of tx wrbs. */
2496 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302497 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002499 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002501 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2502 tx_stats(txo)->tx_compl += work_done;
2503 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2504 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002505}
Sathya Perla3c8def92011-06-12 20:01:58 +00002506
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302507int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002508{
2509 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2510 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002511 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302512 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302513 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002514
Sathya Perla0b545a62012-11-23 00:27:18 +00002515 num_evts = events_get(eqo);
2516
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302517 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2518 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519
Sathya Perla6384a4d2013-10-25 10:40:16 +05302520 if (be_lock_napi(eqo)) {
2521 /* This loop will iterate twice for EQ0 in which
2522 * completions of the last RXQ (default one) are also processed
2523 * For other EQs the loop iterates only once
2524 */
2525 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2526 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2527 max_work = max(work, max_work);
2528 }
2529 be_unlock_napi(eqo);
2530 } else {
2531 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002532 }
2533
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002534 if (is_mcc_eqo(eqo))
2535 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 if (max_work < budget) {
2538 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002539 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002540 } else {
2541 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002542 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002543 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545}
2546
Sathya Perla6384a4d2013-10-25 10:40:16 +05302547#ifdef CONFIG_NET_RX_BUSY_POLL
2548static int be_busy_poll(struct napi_struct *napi)
2549{
2550 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2551 struct be_adapter *adapter = eqo->adapter;
2552 struct be_rx_obj *rxo;
2553 int i, work = 0;
2554
2555 if (!be_lock_busy_poll(eqo))
2556 return LL_FLUSH_BUSY;
2557
2558 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2559 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2560 if (work)
2561 break;
2562 }
2563
2564 be_unlock_busy_poll(eqo);
2565 return work;
2566}
2567#endif
2568
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002569void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002570{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002571 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2572 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002573 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302574 bool error_detected = false;
2575 struct device *dev = &adapter->pdev->dev;
2576 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002577
Sathya Perlad23e9462012-12-17 19:38:51 +00002578 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002579 return;
2580
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002581 if (lancer_chip(adapter)) {
2582 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2583 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2584 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302585 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002586 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302587 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302588 adapter->hw_error = true;
2589 /* Do not log error messages if its a FW reset */
2590 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2591 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2592 dev_info(dev, "Firmware update in progress\n");
2593 } else {
2594 error_detected = true;
2595 dev_err(dev, "Error detected in the card\n");
2596 dev_err(dev, "ERR: sliport status 0x%x\n",
2597 sliport_status);
2598 dev_err(dev, "ERR: sliport error1 0x%x\n",
2599 sliport_err1);
2600 dev_err(dev, "ERR: sliport error2 0x%x\n",
2601 sliport_err2);
2602 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002603 }
2604 } else {
2605 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302606 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002607 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302608 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002609 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302610 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002611 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302612 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002613
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002614 ue_lo = (ue_lo & ~ue_lo_mask);
2615 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002616
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302617 /* On certain platforms BE hardware can indicate spurious UEs.
2618 * Allow HW to stop working completely in case of a real UE.
2619 * Hence not setting the hw_error for UE detection.
2620 */
2621
2622 if (ue_lo || ue_hi) {
2623 error_detected = true;
2624 dev_err(dev,
2625 "Unrecoverable Error detected in the adapter");
2626 dev_err(dev, "Please reboot server to recover");
2627 if (skyhawk_chip(adapter))
2628 adapter->hw_error = true;
2629 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2630 if (ue_lo & 1)
2631 dev_err(dev, "UE: %s bit set\n",
2632 ue_status_low_desc[i]);
2633 }
2634 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2635 if (ue_hi & 1)
2636 dev_err(dev, "UE: %s bit set\n",
2637 ue_status_hi_desc[i]);
2638 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302639 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002640 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302641 if (error_detected)
2642 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002643}
2644
Sathya Perla8d56ff12009-11-22 22:02:26 +00002645static void be_msix_disable(struct be_adapter *adapter)
2646{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002647 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002648 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002649 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302650 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002651 }
2652}
2653
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002654static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002655{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002656 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002657 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002658
Sathya Perla92bf14a2013-08-27 16:57:32 +05302659 /* If RoCE is supported, program the max number of NIC vectors that
2660 * may be configured via set-channels, along with vectors needed for
2661 * RoCe. Else, just program the number we'll use initially.
2662 */
2663 if (be_roce_supported(adapter))
2664 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2665 2 * num_online_cpus());
2666 else
2667 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002668
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002669 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002670 adapter->msix_entries[i].entry = i;
2671
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002672 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2673 MIN_MSIX_VECTORS, num_vec);
2674 if (num_vec < 0)
2675 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002676
Sathya Perla92bf14a2013-08-27 16:57:32 +05302677 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2678 adapter->num_msix_roce_vec = num_vec / 2;
2679 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2680 adapter->num_msix_roce_vec);
2681 }
2682
2683 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2684
2685 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2686 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002687 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002688
2689fail:
2690 dev_warn(dev, "MSIx enable failed\n");
2691
2692 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2693 if (!be_physfn(adapter))
2694 return num_vec;
2695 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696}
2697
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002698static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302699 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302701 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002702}
2703
2704static int be_msix_register(struct be_adapter *adapter)
2705{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 struct net_device *netdev = adapter->netdev;
2707 struct be_eq_obj *eqo;
2708 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002710 for_all_evt_queues(adapter, eqo, i) {
2711 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2712 vec = be_msix_vec_get(adapter, eqo);
2713 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002714 if (status)
2715 goto err_msix;
2716 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002717
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002719err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002720 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2721 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2722 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302723 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002724 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725 return status;
2726}
2727
2728static int be_irq_register(struct be_adapter *adapter)
2729{
2730 struct net_device *netdev = adapter->netdev;
2731 int status;
2732
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002733 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734 status = be_msix_register(adapter);
2735 if (status == 0)
2736 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002737 /* INTx is not supported for VF */
2738 if (!be_physfn(adapter))
2739 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 }
2741
Sathya Perlae49cc342012-11-27 19:50:02 +00002742 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002743 netdev->irq = adapter->pdev->irq;
2744 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002745 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746 if (status) {
2747 dev_err(&adapter->pdev->dev,
2748 "INTx request IRQ failed - err %d\n", status);
2749 return status;
2750 }
2751done:
2752 adapter->isr_registered = true;
2753 return 0;
2754}
2755
2756static void be_irq_unregister(struct be_adapter *adapter)
2757{
2758 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761
2762 if (!adapter->isr_registered)
2763 return;
2764
2765 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002766 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002767 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768 goto done;
2769 }
2770
2771 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002772 for_all_evt_queues(adapter, eqo, i)
2773 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002774
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002775done:
2776 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777}
2778
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002779static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002780{
2781 struct be_queue_info *q;
2782 struct be_rx_obj *rxo;
2783 int i;
2784
2785 for_all_rx_queues(adapter, rxo, i) {
2786 q = &rxo->q;
2787 if (q->created) {
2788 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002789 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002790 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002791 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002792 }
2793}
2794
Sathya Perla889cd4b2010-05-30 23:33:45 +00002795static int be_close(struct net_device *netdev)
2796{
2797 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 struct be_eq_obj *eqo;
2799 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002800
Kalesh APe1ad8e32014-04-14 16:12:41 +05302801 /* This protection is needed as be_close() may be called even when the
2802 * adapter is in cleared state (after eeh perm failure)
2803 */
2804 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2805 return 0;
2806
Parav Pandit045508a2012-03-26 14:27:13 +00002807 be_roce_dev_close(adapter);
2808
Ivan Veceradff345c52013-11-27 08:59:32 +01002809 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2810 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002811 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302812 be_disable_busy_poll(eqo);
2813 }
David S. Miller71237b62013-11-28 18:53:36 -05002814 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002815 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002816
2817 be_async_mcc_disable(adapter);
2818
2819 /* Wait for all pending tx completions to arrive so that
2820 * all tx skbs are freed.
2821 */
Sathya Perlafba87552013-05-08 02:05:50 +00002822 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302823 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002824
2825 be_rx_qs_destroy(adapter);
2826
Ajit Khaparded11a3472013-11-18 10:44:37 -06002827 for (i = 1; i < (adapter->uc_macs + 1); i++)
2828 be_cmd_pmac_del(adapter, adapter->if_handle,
2829 adapter->pmac_id[i], 0);
2830 adapter->uc_macs = 0;
2831
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002832 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833 if (msix_enabled(adapter))
2834 synchronize_irq(be_msix_vec_get(adapter, eqo));
2835 else
2836 synchronize_irq(netdev->irq);
2837 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002838 }
2839
Sathya Perla889cd4b2010-05-30 23:33:45 +00002840 be_irq_unregister(adapter);
2841
Sathya Perla482c9e72011-06-29 23:33:17 +00002842 return 0;
2843}
2844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002846{
2847 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002848 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302849 u8 rss_hkey[RSS_HASH_KEY_LEN];
2850 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002851
2852 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002853 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2854 sizeof(struct be_eth_rx_d));
2855 if (rc)
2856 return rc;
2857 }
2858
2859 /* The FW would like the default RXQ to be created first */
2860 rxo = default_rxo(adapter);
2861 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2862 adapter->if_handle, false, &rxo->rss_id);
2863 if (rc)
2864 return rc;
2865
2866 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002867 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002868 rx_frag_size, adapter->if_handle,
2869 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002870 if (rc)
2871 return rc;
2872 }
2873
2874 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302875 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2876 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002877 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302878 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002879 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302880 rss->rsstable[j + i] = rxo->rss_id;
2881 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002882 }
2883 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302884 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2885 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002886
2887 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302888 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2889 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302890 } else {
2891 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302892 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302893 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002894
Venkata Duvvurue2557872014-04-21 15:38:00 +05302895 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302896 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302897 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302898 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302899 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302900 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002901 }
2902
Venkata Duvvurue2557872014-04-21 15:38:00 +05302903 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2904
Sathya Perla482c9e72011-06-29 23:33:17 +00002905 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002906 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302907 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002908 return 0;
2909}
2910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911static int be_open(struct net_device *netdev)
2912{
2913 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002914 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002915 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002916 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002917 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002918 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002920 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002921 if (status)
2922 goto err;
2923
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002924 status = be_irq_register(adapter);
2925 if (status)
2926 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002927
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002929 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002930
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002931 for_all_tx_queues(adapter, txo, i)
2932 be_cq_notify(adapter, txo->cq.id, true, 0);
2933
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002934 be_async_mcc_enable(adapter);
2935
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002936 for_all_evt_queues(adapter, eqo, i) {
2937 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302938 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302939 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002940 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002941 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942
Sathya Perla323ff712012-09-28 04:39:43 +00002943 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002944 if (!status)
2945 be_link_status_update(adapter, link_status);
2946
Sathya Perlafba87552013-05-08 02:05:50 +00002947 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002948 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302949
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302950#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302951 if (skyhawk_chip(adapter))
2952 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302953#endif
2954
Sathya Perla889cd4b2010-05-30 23:33:45 +00002955 return 0;
2956err:
2957 be_close(adapter->netdev);
2958 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002959}
2960
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002961static int be_setup_wol(struct be_adapter *adapter, bool enable)
2962{
2963 struct be_dma_mem cmd;
2964 int status = 0;
2965 u8 mac[ETH_ALEN];
2966
2967 memset(mac, 0, ETH_ALEN);
2968
2969 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002970 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2971 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302972 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302973 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002974
2975 if (enable) {
2976 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302977 PCICFG_PM_CONTROL_OFFSET,
2978 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002979 if (status) {
2980 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002981 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002982 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2983 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002984 return status;
2985 }
2986 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302987 adapter->netdev->dev_addr,
2988 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002989 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2990 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2991 } else {
2992 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2993 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2994 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2995 }
2996
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002997 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002998 return status;
2999}
3000
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003001/*
3002 * Generate a seed MAC address from the PF MAC Address using jhash.
3003 * MAC Address for VFs are assigned incrementally starting from the seed.
3004 * These addresses are programmed in the ASIC by the PF and the VF driver
3005 * queries for the MAC address during its probe.
3006 */
Sathya Perla4c876612013-02-03 20:30:11 +00003007static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003008{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003009 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003010 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003011 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003012 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003013
3014 be_vf_eth_addr_generate(adapter, mac);
3015
Sathya Perla11ac75e2011-12-13 00:58:50 +00003016 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303017 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003018 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003019 vf_cfg->if_handle,
3020 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303021 else
3022 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3023 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003024
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003025 if (status)
3026 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303027 "Mac address assignment failed for VF %d\n",
3028 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003029 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003030 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003031
3032 mac[5] += 1;
3033 }
3034 return status;
3035}
3036
Sathya Perla4c876612013-02-03 20:30:11 +00003037static int be_vfs_mac_query(struct be_adapter *adapter)
3038{
3039 int status, vf;
3040 u8 mac[ETH_ALEN];
3041 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003042
3043 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303044 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3045 mac, vf_cfg->if_handle,
3046 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003047 if (status)
3048 return status;
3049 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3050 }
3051 return 0;
3052}
3053
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003054static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003055{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003056 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003057 u32 vf;
3058
Sathya Perla257a3fe2013-06-14 15:54:51 +05303059 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003060 dev_warn(&adapter->pdev->dev,
3061 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003062 goto done;
3063 }
3064
Sathya Perlab4c1df92013-05-08 02:05:47 +00003065 pci_disable_sriov(adapter->pdev);
3066
Sathya Perla11ac75e2011-12-13 00:58:50 +00003067 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303068 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003069 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3070 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303071 else
3072 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3073 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003074
Sathya Perla11ac75e2011-12-13 00:58:50 +00003075 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3076 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003077done:
3078 kfree(adapter->vf_cfg);
3079 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303080 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003081}
3082
Sathya Perla77071332013-08-27 16:57:34 +05303083static void be_clear_queues(struct be_adapter *adapter)
3084{
3085 be_mcc_queues_destroy(adapter);
3086 be_rx_cqs_destroy(adapter);
3087 be_tx_queues_destroy(adapter);
3088 be_evt_queues_destroy(adapter);
3089}
3090
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303091static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003092{
Sathya Perla191eb752012-02-23 18:50:13 +00003093 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3094 cancel_delayed_work_sync(&adapter->work);
3095 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3096 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303097}
3098
Somnath Koturb05004a2013-12-05 12:08:16 +05303099static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303100{
3101 int i;
3102
Somnath Koturb05004a2013-12-05 12:08:16 +05303103 if (adapter->pmac_id) {
3104 for (i = 0; i < (adapter->uc_macs + 1); i++)
3105 be_cmd_pmac_del(adapter, adapter->if_handle,
3106 adapter->pmac_id[i], 0);
3107 adapter->uc_macs = 0;
3108
3109 kfree(adapter->pmac_id);
3110 adapter->pmac_id = NULL;
3111 }
3112}
3113
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303114#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303115static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3116{
3117 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3118 be_cmd_manage_iface(adapter, adapter->if_handle,
3119 OP_CONVERT_TUNNEL_TO_NORMAL);
3120
3121 if (adapter->vxlan_port)
3122 be_cmd_set_vxlan_port(adapter, 0);
3123
3124 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3125 adapter->vxlan_port = 0;
3126}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303127#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303128
Somnath Koturb05004a2013-12-05 12:08:16 +05303129static int be_clear(struct be_adapter *adapter)
3130{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303131 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003132
Sathya Perla11ac75e2011-12-13 00:58:50 +00003133 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003134 be_vf_clear(adapter);
3135
Vasundhara Volambec84e62014-06-30 13:01:32 +05303136 /* Re-configure FW to distribute resources evenly across max-supported
3137 * number of VFs, only when VFs are not already enabled.
3138 */
3139 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3140 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3141 pci_sriov_get_totalvfs(adapter->pdev));
3142
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303143#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303144 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303145#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303146 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303147 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003148
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003149 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003150
Sathya Perla77071332013-08-27 16:57:34 +05303151 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003153 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303154 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003155 return 0;
3156}
3157
Sathya Perla4c876612013-02-03 20:30:11 +00003158static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003159{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303160 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003161 struct be_vf_cfg *vf_cfg;
3162 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003163 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003164
Sathya Perla4c876612013-02-03 20:30:11 +00003165 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3166 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003167
Sathya Perla4c876612013-02-03 20:30:11 +00003168 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303169 if (!BE3_chip(adapter)) {
3170 status = be_cmd_get_profile_config(adapter, &res,
3171 vf + 1);
3172 if (!status)
3173 cap_flags = res.if_cap_flags;
3174 }
Sathya Perla4c876612013-02-03 20:30:11 +00003175
3176 /* If a FW profile exists, then cap_flags are updated */
3177 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303178 BE_IF_FLAGS_BROADCAST |
3179 BE_IF_FLAGS_MULTICAST);
3180 status =
3181 be_cmd_if_create(adapter, cap_flags, en_flags,
3182 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003183 if (status)
3184 goto err;
3185 }
3186err:
3187 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003188}
3189
Sathya Perla39f1d942012-05-08 19:41:24 +00003190static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003191{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003192 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003193 int vf;
3194
Sathya Perla39f1d942012-05-08 19:41:24 +00003195 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3196 GFP_KERNEL);
3197 if (!adapter->vf_cfg)
3198 return -ENOMEM;
3199
Sathya Perla11ac75e2011-12-13 00:58:50 +00003200 for_all_vfs(adapter, vf_cfg, vf) {
3201 vf_cfg->if_handle = -1;
3202 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003203 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003204 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003205}
3206
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003207static int be_vf_setup(struct be_adapter *adapter)
3208{
Sathya Perla4c876612013-02-03 20:30:11 +00003209 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303210 struct be_vf_cfg *vf_cfg;
3211 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303212 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003213
Sathya Perla257a3fe2013-06-14 15:54:51 +05303214 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003215
3216 status = be_vf_setup_init(adapter);
3217 if (status)
3218 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003219
Sathya Perla4c876612013-02-03 20:30:11 +00003220 if (old_vfs) {
3221 for_all_vfs(adapter, vf_cfg, vf) {
3222 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3223 if (status)
3224 goto err;
3225 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003226
Sathya Perla4c876612013-02-03 20:30:11 +00003227 status = be_vfs_mac_query(adapter);
3228 if (status)
3229 goto err;
3230 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303231 status = be_vfs_if_create(adapter);
3232 if (status)
3233 goto err;
3234
Sathya Perla39f1d942012-05-08 19:41:24 +00003235 status = be_vf_eth_addr_config(adapter);
3236 if (status)
3237 goto err;
3238 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003239
Sathya Perla11ac75e2011-12-13 00:58:50 +00003240 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303241 /* Allow VFs to programs MAC/VLAN filters */
3242 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3243 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3244 status = be_cmd_set_fn_privileges(adapter,
3245 privileges |
3246 BE_PRIV_FILTMGMT,
3247 vf + 1);
3248 if (!status)
3249 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3250 vf);
3251 }
3252
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303253 /* Allow full available bandwidth */
3254 if (!old_vfs)
3255 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003256
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303257 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303258 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303259 be_cmd_set_logical_link_config(adapter,
3260 IFLA_VF_LINK_STATE_AUTO,
3261 vf+1);
3262 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003263 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003264
3265 if (!old_vfs) {
3266 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3267 if (status) {
3268 dev_err(dev, "SRIOV enable failed\n");
3269 adapter->num_vfs = 0;
3270 goto err;
3271 }
3272 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303273
3274 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003275 return 0;
3276err:
Sathya Perla4c876612013-02-03 20:30:11 +00003277 dev_err(dev, "VF setup failed\n");
3278 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003279 return status;
3280}
3281
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303282/* Converting function_mode bits on BE3 to SH mc_type enums */
3283
3284static u8 be_convert_mc_type(u32 function_mode)
3285{
Suresh Reddy66064db2014-06-23 16:41:29 +05303286 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303287 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303288 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303289 return FLEX10;
3290 else if (function_mode & VNIC_MODE)
3291 return vNIC2;
3292 else if (function_mode & UMC_ENABLED)
3293 return UMC;
3294 else
3295 return MC_NONE;
3296}
3297
Sathya Perla92bf14a2013-08-27 16:57:32 +05303298/* On BE2/BE3 FW does not suggest the supported limits */
3299static void BEx_get_resources(struct be_adapter *adapter,
3300 struct be_resources *res)
3301{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303302 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303303
3304 if (be_physfn(adapter))
3305 res->max_uc_mac = BE_UC_PMAC_COUNT;
3306 else
3307 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3308
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303309 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3310
3311 if (be_is_mc(adapter)) {
3312 /* Assuming that there are 4 channels per port,
3313 * when multi-channel is enabled
3314 */
3315 if (be_is_qnq_mode(adapter))
3316 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3317 else
3318 /* In a non-qnq multichannel mode, the pvid
3319 * takes up one vlan entry
3320 */
3321 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3322 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303323 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303324 }
3325
Sathya Perla92bf14a2013-08-27 16:57:32 +05303326 res->max_mcast_mac = BE_MAX_MC;
3327
Vasundhara Volama5243da2014-03-11 18:53:07 +05303328 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3329 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3330 * *only* if it is RSS-capable.
3331 */
3332 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3333 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303334 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303335 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303336 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3337 struct be_resources super_nic_res = {0};
3338
3339 /* On a SuperNIC profile, the driver needs to use the
3340 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3341 */
3342 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3343 /* Some old versions of BE3 FW don't report max_tx_qs value */
3344 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3345 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303346 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303347 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303348
3349 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3350 !use_sriov && be_physfn(adapter))
3351 res->max_rss_qs = (adapter->be3_native) ?
3352 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3353 res->max_rx_qs = res->max_rss_qs + 1;
3354
Suresh Reddye3dc8672014-01-06 13:02:25 +05303355 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303356 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303357 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3358 else
3359 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303360
3361 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3362 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3363 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3364}
3365
Sathya Perla30128032011-11-10 19:17:57 +00003366static void be_setup_init(struct be_adapter *adapter)
3367{
3368 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003369 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003370 adapter->if_handle = -1;
3371 adapter->be3_native = false;
3372 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003373 if (be_physfn(adapter))
3374 adapter->cmd_privileges = MAX_PRIVILEGES;
3375 else
3376 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003377}
3378
Vasundhara Volambec84e62014-06-30 13:01:32 +05303379static int be_get_sriov_config(struct be_adapter *adapter)
3380{
3381 struct device *dev = &adapter->pdev->dev;
3382 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303383 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303384
3385 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303386 be_cmd_get_profile_config(adapter, &res, 0);
3387
Vasundhara Volambec84e62014-06-30 13:01:32 +05303388 if (BE3_chip(adapter) && !res.max_vfs) {
3389 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3390 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3391 }
3392
Sathya Perlad3d18312014-08-01 17:47:30 +05303393 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303394
3395 if (!be_max_vfs(adapter)) {
3396 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303397 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303398 adapter->num_vfs = 0;
3399 return 0;
3400 }
3401
Sathya Perlad3d18312014-08-01 17:47:30 +05303402 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3403
Vasundhara Volambec84e62014-06-30 13:01:32 +05303404 /* validate num_vfs module param */
3405 old_vfs = pci_num_vf(adapter->pdev);
3406 if (old_vfs) {
3407 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3408 if (old_vfs != num_vfs)
3409 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3410 adapter->num_vfs = old_vfs;
3411 } else {
3412 if (num_vfs > be_max_vfs(adapter)) {
3413 dev_info(dev, "Resources unavailable to init %d VFs\n",
3414 num_vfs);
3415 dev_info(dev, "Limiting to %d VFs\n",
3416 be_max_vfs(adapter));
3417 }
3418 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3419 }
3420
3421 return 0;
3422}
3423
Sathya Perla92bf14a2013-08-27 16:57:32 +05303424static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003425{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303426 struct device *dev = &adapter->pdev->dev;
3427 struct be_resources res = {0};
3428 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003429
Sathya Perla92bf14a2013-08-27 16:57:32 +05303430 if (BEx_chip(adapter)) {
3431 BEx_get_resources(adapter, &res);
3432 adapter->res = res;
3433 }
3434
Sathya Perla92bf14a2013-08-27 16:57:32 +05303435 /* For Lancer, SH etc read per-function resource limits from FW.
3436 * GET_FUNC_CONFIG returns per function guaranteed limits.
3437 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3438 */
Sathya Perla4c876612013-02-03 20:30:11 +00003439 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303440 status = be_cmd_get_func_config(adapter, &res);
3441 if (status)
3442 return status;
3443
3444 /* If RoCE may be enabled stash away half the EQs for RoCE */
3445 if (be_roce_supported(adapter))
3446 res.max_evt_qs /= 2;
3447 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003448 }
3449
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303450 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3451 be_max_txqs(adapter), be_max_rxqs(adapter),
3452 be_max_rss(adapter), be_max_eqs(adapter),
3453 be_max_vfs(adapter));
3454 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3455 be_max_uc(adapter), be_max_mc(adapter),
3456 be_max_vlans(adapter));
3457
Sathya Perla92bf14a2013-08-27 16:57:32 +05303458 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003459}
3460
Sathya Perlad3d18312014-08-01 17:47:30 +05303461static void be_sriov_config(struct be_adapter *adapter)
3462{
3463 struct device *dev = &adapter->pdev->dev;
3464 int status;
3465
3466 status = be_get_sriov_config(adapter);
3467 if (status) {
3468 dev_err(dev, "Failed to query SR-IOV configuration\n");
3469 dev_err(dev, "SR-IOV cannot be enabled\n");
3470 return;
3471 }
3472
3473 /* When the HW is in SRIOV capable configuration, the PF-pool
3474 * resources are equally distributed across the max-number of
3475 * VFs. The user may request only a subset of the max-vfs to be
3476 * enabled. Based on num_vfs, redistribute the resources across
3477 * num_vfs so that each VF will have access to more number of
3478 * resources. This facility is not available in BE3 FW.
3479 * Also, this is done by FW in Lancer chip.
3480 */
3481 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3482 status = be_cmd_set_sriov_config(adapter,
3483 adapter->pool_res,
3484 adapter->num_vfs);
3485 if (status)
3486 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3487 }
3488}
3489
Sathya Perla39f1d942012-05-08 19:41:24 +00003490static int be_get_config(struct be_adapter *adapter)
3491{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303492 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003493 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003494
Kalesh APe97e3cd2014-07-17 16:20:26 +05303495 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003496 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303497 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003498
Vasundhara Volam542963b2014-01-15 13:23:33 +05303499 if (be_physfn(adapter)) {
3500 status = be_cmd_get_active_profile(adapter, &profile_id);
3501 if (!status)
3502 dev_info(&adapter->pdev->dev,
3503 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303504 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303505
Sathya Perlad3d18312014-08-01 17:47:30 +05303506 if (!BE2_chip(adapter) && be_physfn(adapter))
3507 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303508
Sathya Perla92bf14a2013-08-27 16:57:32 +05303509 status = be_get_resources(adapter);
3510 if (status)
3511 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003512
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303513 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3514 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303515 if (!adapter->pmac_id)
3516 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003517
Sathya Perla92bf14a2013-08-27 16:57:32 +05303518 /* Sanitize cfg_num_qs based on HW and platform limits */
3519 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3520
3521 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003522}
3523
Sathya Perla95046b92013-07-23 15:25:02 +05303524static int be_mac_setup(struct be_adapter *adapter)
3525{
3526 u8 mac[ETH_ALEN];
3527 int status;
3528
3529 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3530 status = be_cmd_get_perm_mac(adapter, mac);
3531 if (status)
3532 return status;
3533
3534 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3535 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3536 } else {
3537 /* Maybe the HW was reset; dev_addr must be re-programmed */
3538 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3539 }
3540
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003541 /* For BE3-R VFs, the PF programs the initial MAC address */
3542 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3543 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303545 return 0;
3546}
3547
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303548static void be_schedule_worker(struct be_adapter *adapter)
3549{
3550 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3551 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3552}
3553
Sathya Perla77071332013-08-27 16:57:34 +05303554static int be_setup_queues(struct be_adapter *adapter)
3555{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303556 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303557 int status;
3558
3559 status = be_evt_queues_create(adapter);
3560 if (status)
3561 goto err;
3562
3563 status = be_tx_qs_create(adapter);
3564 if (status)
3565 goto err;
3566
3567 status = be_rx_cqs_create(adapter);
3568 if (status)
3569 goto err;
3570
3571 status = be_mcc_queues_create(adapter);
3572 if (status)
3573 goto err;
3574
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303575 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3576 if (status)
3577 goto err;
3578
3579 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3580 if (status)
3581 goto err;
3582
Sathya Perla77071332013-08-27 16:57:34 +05303583 return 0;
3584err:
3585 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3586 return status;
3587}
3588
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303589int be_update_queues(struct be_adapter *adapter)
3590{
3591 struct net_device *netdev = adapter->netdev;
3592 int status;
3593
3594 if (netif_running(netdev))
3595 be_close(netdev);
3596
3597 be_cancel_worker(adapter);
3598
3599 /* If any vectors have been shared with RoCE we cannot re-program
3600 * the MSIx table.
3601 */
3602 if (!adapter->num_msix_roce_vec)
3603 be_msix_disable(adapter);
3604
3605 be_clear_queues(adapter);
3606
3607 if (!msix_enabled(adapter)) {
3608 status = be_msix_enable(adapter);
3609 if (status)
3610 return status;
3611 }
3612
3613 status = be_setup_queues(adapter);
3614 if (status)
3615 return status;
3616
3617 be_schedule_worker(adapter);
3618
3619 if (netif_running(netdev))
3620 status = be_open(netdev);
3621
3622 return status;
3623}
3624
Sathya Perla5fb379e2009-06-18 00:02:59 +00003625static int be_setup(struct be_adapter *adapter)
3626{
Sathya Perla39f1d942012-05-08 19:41:24 +00003627 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303628 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003629 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003630
Sathya Perla30128032011-11-10 19:17:57 +00003631 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003632
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003633 if (!lancer_chip(adapter))
3634 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003635
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003636 status = be_get_config(adapter);
3637 if (status)
3638 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003639
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003640 status = be_msix_enable(adapter);
3641 if (status)
3642 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003643
Sathya Perla77071332013-08-27 16:57:34 +05303644 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3645 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3646 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3647 en_flags |= BE_IF_FLAGS_RSS;
3648 en_flags = en_flags & be_if_cap_flags(adapter);
3649 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3650 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003651 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003652 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003653
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303654 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3655 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303656 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303657 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003658 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003659 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003661 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003662
Sathya Perla95046b92013-07-23 15:25:02 +05303663 status = be_mac_setup(adapter);
3664 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003665 goto err;
3666
Kalesh APe97e3cd2014-07-17 16:20:26 +05303667 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303668 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003669
Somnath Koture9e2a902013-10-24 14:37:53 +05303670 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303671 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303672 adapter->fw_ver);
3673 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3674 }
3675
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003676 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003677 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003678
3679 be_set_rx_mode(adapter->netdev);
3680
Suresh Reddy76a9e082014-01-15 13:23:40 +05303681 be_cmd_get_acpi_wol_cap(adapter);
3682
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003683 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003684
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003685 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3686 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003687 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003688
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303689 if (be_physfn(adapter))
3690 be_cmd_set_logical_link_config(adapter,
3691 IFLA_VF_LINK_STATE_AUTO, 0);
3692
Vasundhara Volambec84e62014-06-30 13:01:32 +05303693 if (adapter->num_vfs)
3694 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003695
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003696 status = be_cmd_get_phy_info(adapter);
3697 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003698 adapter->phy.fc_autoneg = 1;
3699
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303700 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303701 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003702 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003703err:
3704 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705 return status;
3706}
3707
Ivan Vecera66268732011-12-08 01:31:21 +00003708#ifdef CONFIG_NET_POLL_CONTROLLER
3709static void be_netpoll(struct net_device *netdev)
3710{
3711 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003712 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003713 int i;
3714
Sathya Perlae49cc342012-11-27 19:50:02 +00003715 for_all_evt_queues(adapter, eqo, i) {
3716 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3717 napi_schedule(&eqo->napi);
3718 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003719
3720 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003721}
3722#endif
3723
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303724static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003725
Sathya Perla306f1342011-08-02 19:57:45 +00003726static bool phy_flashing_required(struct be_adapter *adapter)
3727{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003728 return (adapter->phy.phy_type == TN_8022 &&
3729 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003730}
3731
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003732static bool is_comp_in_ufi(struct be_adapter *adapter,
3733 struct flash_section_info *fsec, int type)
3734{
3735 int i = 0, img_type = 0;
3736 struct flash_section_info_g2 *fsec_g2 = NULL;
3737
Sathya Perlaca34fe32012-11-06 17:48:56 +00003738 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003739 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3740
3741 for (i = 0; i < MAX_FLASH_COMP; i++) {
3742 if (fsec_g2)
3743 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3744 else
3745 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3746
3747 if (img_type == type)
3748 return true;
3749 }
3750 return false;
3751
3752}
3753
Jingoo Han4188e7d2013-08-05 18:02:02 +09003754static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303755 int header_size,
3756 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003757{
3758 struct flash_section_info *fsec = NULL;
3759 const u8 *p = fw->data;
3760
3761 p += header_size;
3762 while (p < (fw->data + fw->size)) {
3763 fsec = (struct flash_section_info *)p;
3764 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3765 return fsec;
3766 p += 32;
3767 }
3768 return NULL;
3769}
3770
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303771static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3772 u32 img_offset, u32 img_size, int hdr_size,
3773 u16 img_optype, bool *crc_match)
3774{
3775 u32 crc_offset;
3776 int status;
3777 u8 crc[4];
3778
3779 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3780 if (status)
3781 return status;
3782
3783 crc_offset = hdr_size + img_offset + img_size - 4;
3784
3785 /* Skip flashing, if crc of flashed region matches */
3786 if (!memcmp(crc, p + crc_offset, 4))
3787 *crc_match = true;
3788 else
3789 *crc_match = false;
3790
3791 return status;
3792}
3793
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003794static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303795 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003796{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003797 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303798 u32 total_bytes, flash_op, num_bytes;
3799 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003800
3801 total_bytes = img_size;
3802 while (total_bytes) {
3803 num_bytes = min_t(u32, 32*1024, total_bytes);
3804
3805 total_bytes -= num_bytes;
3806
3807 if (!total_bytes) {
3808 if (optype == OPTYPE_PHY_FW)
3809 flash_op = FLASHROM_OPER_PHY_FLASH;
3810 else
3811 flash_op = FLASHROM_OPER_FLASH;
3812 } else {
3813 if (optype == OPTYPE_PHY_FW)
3814 flash_op = FLASHROM_OPER_PHY_SAVE;
3815 else
3816 flash_op = FLASHROM_OPER_SAVE;
3817 }
3818
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003819 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003820 img += num_bytes;
3821 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303822 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303823 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303824 optype == OPTYPE_PHY_FW)
3825 break;
3826 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003827 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003828 }
3829 return 0;
3830}
3831
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003832/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003833static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303834 const struct firmware *fw,
3835 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003836{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003837 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303838 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003839 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303840 int status, i, filehdr_size, num_comp;
3841 const struct flash_comp *pflashcomp;
3842 bool crc_match;
3843 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003844
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003845 struct flash_comp gen3_flash_types[] = {
3846 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3847 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3848 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3849 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3850 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3851 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3852 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3853 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3854 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3855 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3856 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3857 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3858 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3859 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3860 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3861 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3862 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3863 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3864 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3865 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003866 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003867
3868 struct flash_comp gen2_flash_types[] = {
3869 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3870 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3871 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3872 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3873 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3874 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3875 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3876 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3877 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3878 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3879 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3880 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3881 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3882 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3883 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3884 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003885 };
3886
Sathya Perlaca34fe32012-11-06 17:48:56 +00003887 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003888 pflashcomp = gen3_flash_types;
3889 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003890 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003891 } else {
3892 pflashcomp = gen2_flash_types;
3893 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003894 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003895 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003896
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003897 /* Get flash section info*/
3898 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3899 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303900 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003901 return -1;
3902 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003903 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003904 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003905 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003906
3907 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3908 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3909 continue;
3910
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003911 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3912 !phy_flashing_required(adapter))
3913 continue;
3914
3915 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303916 status = be_check_flash_crc(adapter, fw->data,
3917 pflashcomp[i].offset,
3918 pflashcomp[i].size,
3919 filehdr_size +
3920 img_hdrs_size,
3921 OPTYPE_REDBOOT, &crc_match);
3922 if (status) {
3923 dev_err(dev,
3924 "Could not get CRC for 0x%x region\n",
3925 pflashcomp[i].optype);
3926 continue;
3927 }
3928
3929 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003930 continue;
3931 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003932
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303933 p = fw->data + filehdr_size + pflashcomp[i].offset +
3934 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003935 if (p + pflashcomp[i].size > fw->data + fw->size)
3936 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003937
3938 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303939 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003940 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303941 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003942 pflashcomp[i].img_type);
3943 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003944 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003945 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003946 return 0;
3947}
3948
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303949static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3950{
3951 u32 img_type = le32_to_cpu(fsec_entry.type);
3952 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3953
3954 if (img_optype != 0xFFFF)
3955 return img_optype;
3956
3957 switch (img_type) {
3958 case IMAGE_FIRMWARE_iSCSI:
3959 img_optype = OPTYPE_ISCSI_ACTIVE;
3960 break;
3961 case IMAGE_BOOT_CODE:
3962 img_optype = OPTYPE_REDBOOT;
3963 break;
3964 case IMAGE_OPTION_ROM_ISCSI:
3965 img_optype = OPTYPE_BIOS;
3966 break;
3967 case IMAGE_OPTION_ROM_PXE:
3968 img_optype = OPTYPE_PXE_BIOS;
3969 break;
3970 case IMAGE_OPTION_ROM_FCoE:
3971 img_optype = OPTYPE_FCOE_BIOS;
3972 break;
3973 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3974 img_optype = OPTYPE_ISCSI_BACKUP;
3975 break;
3976 case IMAGE_NCSI:
3977 img_optype = OPTYPE_NCSI_FW;
3978 break;
3979 case IMAGE_FLASHISM_JUMPVECTOR:
3980 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3981 break;
3982 case IMAGE_FIRMWARE_PHY:
3983 img_optype = OPTYPE_SH_PHY_FW;
3984 break;
3985 case IMAGE_REDBOOT_DIR:
3986 img_optype = OPTYPE_REDBOOT_DIR;
3987 break;
3988 case IMAGE_REDBOOT_CONFIG:
3989 img_optype = OPTYPE_REDBOOT_CONFIG;
3990 break;
3991 case IMAGE_UFI_DIR:
3992 img_optype = OPTYPE_UFI_DIR;
3993 break;
3994 default:
3995 break;
3996 }
3997
3998 return img_optype;
3999}
4000
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004001static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304002 const struct firmware *fw,
4003 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004004{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004005 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304006 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004007 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304008 u32 img_offset, img_size, img_type;
4009 int status, i, filehdr_size;
4010 bool crc_match, old_fw_img;
4011 u16 img_optype;
4012 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004013
4014 filehdr_size = sizeof(struct flash_file_hdr_g3);
4015 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4016 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304017 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304018 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004019 }
4020
4021 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4022 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4023 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304024 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4025 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4026 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004027
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304028 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004029 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304030 /* Don't bother verifying CRC if an old FW image is being
4031 * flashed
4032 */
4033 if (old_fw_img)
4034 goto flash;
4035
4036 status = be_check_flash_crc(adapter, fw->data, img_offset,
4037 img_size, filehdr_size +
4038 img_hdrs_size, img_optype,
4039 &crc_match);
4040 /* The current FW image on the card does not recognize the new
4041 * FLASH op_type. The FW download is partially complete.
4042 * Reboot the server now to enable FW image to recognize the
4043 * new FLASH op_type. To complete the remaining process,
4044 * download the same FW again after the reboot.
4045 */
Kalesh AP4c600052014-05-30 19:06:26 +05304046 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4047 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304048 dev_err(dev, "Flash incomplete. Reset the server\n");
4049 dev_err(dev, "Download FW image again after reset\n");
4050 return -EAGAIN;
4051 } else if (status) {
4052 dev_err(dev, "Could not get CRC for 0x%x region\n",
4053 img_optype);
4054 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004055 }
4056
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304057 if (crc_match)
4058 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004059
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304060flash:
4061 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004062 if (p + img_size > fw->data + fw->size)
4063 return -1;
4064
4065 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304066 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4067 * UFI_DIR region
4068 */
Kalesh AP4c600052014-05-30 19:06:26 +05304069 if (old_fw_img &&
4070 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4071 (img_optype == OPTYPE_UFI_DIR &&
4072 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304073 continue;
4074 } else if (status) {
4075 dev_err(dev, "Flashing section type 0x%x failed\n",
4076 img_type);
4077 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004078 }
4079 }
4080 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004081}
4082
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004083static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304084 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004085{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004086#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4087#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304088 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004089 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004090 const u8 *data_ptr = NULL;
4091 u8 *dest_image_ptr = NULL;
4092 size_t image_size = 0;
4093 u32 chunk_size = 0;
4094 u32 data_written = 0;
4095 u32 offset = 0;
4096 int status = 0;
4097 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004098 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004099
4100 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304101 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304102 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004103 }
4104
4105 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4106 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304107 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004108 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304109 if (!flash_cmd.va)
4110 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004111
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004112 dest_image_ptr = flash_cmd.va +
4113 sizeof(struct lancer_cmd_req_write_object);
4114 image_size = fw->size;
4115 data_ptr = fw->data;
4116
4117 while (image_size) {
4118 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4119
4120 /* Copy the image chunk content. */
4121 memcpy(dest_image_ptr, data_ptr, chunk_size);
4122
4123 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004124 chunk_size, offset,
4125 LANCER_FW_DOWNLOAD_LOCATION,
4126 &data_written, &change_status,
4127 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004128 if (status)
4129 break;
4130
4131 offset += data_written;
4132 data_ptr += data_written;
4133 image_size -= data_written;
4134 }
4135
4136 if (!status) {
4137 /* Commit the FW written */
4138 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004139 0, offset,
4140 LANCER_FW_DOWNLOAD_LOCATION,
4141 &data_written, &change_status,
4142 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004143 }
4144
Kalesh APbb864e02014-09-02 09:56:51 +05304145 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004146 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304147 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304148 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004149 }
4150
Kalesh APbb864e02014-09-02 09:56:51 +05304151 dev_info(dev, "Firmware flashed successfully\n");
4152
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004153 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304154 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004155 status = lancer_physdev_ctrl(adapter,
4156 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004157 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304158 dev_err(dev, "Adapter busy, could not reset FW\n");
4159 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004160 }
4161 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304162 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004163 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304164
4165 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004166}
4167
Sathya Perlaca34fe32012-11-06 17:48:56 +00004168#define UFI_TYPE2 2
4169#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004170#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004171#define UFI_TYPE4 4
4172static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004173 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004174{
Kalesh APddf11692014-07-17 16:20:28 +05304175 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004176 goto be_get_ufi_exit;
4177
Sathya Perlaca34fe32012-11-06 17:48:56 +00004178 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4179 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004180 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4181 if (fhdr->asic_type_rev == 0x10)
4182 return UFI_TYPE3R;
4183 else
4184 return UFI_TYPE3;
4185 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004186 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004187
4188be_get_ufi_exit:
4189 dev_err(&adapter->pdev->dev,
4190 "UFI and Interface are not compatible for flashing\n");
4191 return -1;
4192}
4193
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004194static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4195{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004196 struct flash_file_hdr_g3 *fhdr3;
4197 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004198 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004199 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004200 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004201
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004202 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004203 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4204 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004205 if (!flash_cmd.va) {
4206 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004207 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004208 }
4209
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004210 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004211 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004212
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004213 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004214
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004215 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4216 for (i = 0; i < num_imgs; i++) {
4217 img_hdr_ptr = (struct image_hdr *)(fw->data +
4218 (sizeof(struct flash_file_hdr_g3) +
4219 i * sizeof(struct image_hdr)));
4220 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004221 switch (ufi_type) {
4222 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004223 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304224 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004225 break;
4226 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004227 status = be_flash_BEx(adapter, fw, &flash_cmd,
4228 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004229 break;
4230 case UFI_TYPE3:
4231 /* Do not flash this ufi on BE3-R cards */
4232 if (adapter->asic_rev < 0x10)
4233 status = be_flash_BEx(adapter, fw,
4234 &flash_cmd,
4235 num_imgs);
4236 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304237 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004238 dev_err(&adapter->pdev->dev,
4239 "Can't load BE3 UFI on BE3R\n");
4240 }
4241 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004242 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004243 }
4244
Sathya Perlaca34fe32012-11-06 17:48:56 +00004245 if (ufi_type == UFI_TYPE2)
4246 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004247 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304248 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004249
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004250 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4251 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004252 if (status) {
4253 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004254 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004255 }
4256
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004257 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004258
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004259be_fw_exit:
4260 return status;
4261}
4262
4263int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4264{
4265 const struct firmware *fw;
4266 int status;
4267
4268 if (!netif_running(adapter->netdev)) {
4269 dev_err(&adapter->pdev->dev,
4270 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304271 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004272 }
4273
4274 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4275 if (status)
4276 goto fw_exit;
4277
4278 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4279
4280 if (lancer_chip(adapter))
4281 status = lancer_fw_download(adapter, fw);
4282 else
4283 status = be_fw_download(adapter, fw);
4284
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004285 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304286 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004287
Ajit Khaparde84517482009-09-04 03:12:16 +00004288fw_exit:
4289 release_firmware(fw);
4290 return status;
4291}
4292
Sathya Perla748b5392014-05-09 13:29:13 +05304293static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004294{
4295 struct be_adapter *adapter = netdev_priv(dev);
4296 struct nlattr *attr, *br_spec;
4297 int rem;
4298 int status = 0;
4299 u16 mode = 0;
4300
4301 if (!sriov_enabled(adapter))
4302 return -EOPNOTSUPP;
4303
4304 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4305
4306 nla_for_each_nested(attr, br_spec, rem) {
4307 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4308 continue;
4309
4310 mode = nla_get_u16(attr);
4311 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4312 return -EINVAL;
4313
4314 status = be_cmd_set_hsw_config(adapter, 0, 0,
4315 adapter->if_handle,
4316 mode == BRIDGE_MODE_VEPA ?
4317 PORT_FWD_TYPE_VEPA :
4318 PORT_FWD_TYPE_VEB);
4319 if (status)
4320 goto err;
4321
4322 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4323 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4324
4325 return status;
4326 }
4327err:
4328 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4329 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4330
4331 return status;
4332}
4333
4334static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304335 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004336{
4337 struct be_adapter *adapter = netdev_priv(dev);
4338 int status = 0;
4339 u8 hsw_mode;
4340
4341 if (!sriov_enabled(adapter))
4342 return 0;
4343
4344 /* BE and Lancer chips support VEB mode only */
4345 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4346 hsw_mode = PORT_FWD_TYPE_VEB;
4347 } else {
4348 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4349 adapter->if_handle, &hsw_mode);
4350 if (status)
4351 return 0;
4352 }
4353
4354 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4355 hsw_mode == PORT_FWD_TYPE_VEPA ?
4356 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4357}
4358
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304359#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304360static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4361 __be16 port)
4362{
4363 struct be_adapter *adapter = netdev_priv(netdev);
4364 struct device *dev = &adapter->pdev->dev;
4365 int status;
4366
4367 if (lancer_chip(adapter) || BEx_chip(adapter))
4368 return;
4369
4370 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4371 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4372 be16_to_cpu(port));
4373 dev_info(dev,
4374 "Only one UDP port supported for VxLAN offloads\n");
4375 return;
4376 }
4377
4378 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4379 OP_CONVERT_NORMAL_TO_TUNNEL);
4380 if (status) {
4381 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4382 goto err;
4383 }
4384
4385 status = be_cmd_set_vxlan_port(adapter, port);
4386 if (status) {
4387 dev_warn(dev, "Failed to add VxLAN port\n");
4388 goto err;
4389 }
4390 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4391 adapter->vxlan_port = port;
4392
4393 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4394 be16_to_cpu(port));
4395 return;
4396err:
4397 be_disable_vxlan_offloads(adapter);
4398 return;
4399}
4400
4401static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4402 __be16 port)
4403{
4404 struct be_adapter *adapter = netdev_priv(netdev);
4405
4406 if (lancer_chip(adapter) || BEx_chip(adapter))
4407 return;
4408
4409 if (adapter->vxlan_port != port)
4410 return;
4411
4412 be_disable_vxlan_offloads(adapter);
4413
4414 dev_info(&adapter->pdev->dev,
4415 "Disabled VxLAN offloads for UDP port %d\n",
4416 be16_to_cpu(port));
4417}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304418#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304419
stephen hemmingere5686ad2012-01-05 19:10:25 +00004420static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004421 .ndo_open = be_open,
4422 .ndo_stop = be_close,
4423 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004424 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425 .ndo_set_mac_address = be_mac_addr_set,
4426 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004427 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004428 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004429 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4430 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004431 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004432 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004433 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004434 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304435 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004436#ifdef CONFIG_NET_POLL_CONTROLLER
4437 .ndo_poll_controller = be_netpoll,
4438#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004439 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4440 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304441#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304442 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304443#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304444#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304445 .ndo_add_vxlan_port = be_add_vxlan_port,
4446 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304447#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004448};
4449
4450static void be_netdev_init(struct net_device *netdev)
4451{
4452 struct be_adapter *adapter = netdev_priv(netdev);
4453
Sathya Perlac9c47142014-03-27 10:46:19 +05304454 if (skyhawk_chip(adapter)) {
4455 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4456 NETIF_F_TSO | NETIF_F_TSO6 |
4457 NETIF_F_GSO_UDP_TUNNEL;
4458 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4459 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004460 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004461 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004462 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004463 if (be_multi_rxq(adapter))
4464 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004465
4466 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004467 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004468
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004469 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004470 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004471
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004472 netdev->priv_flags |= IFF_UNICAST_FLT;
4473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004474 netdev->flags |= IFF_MULTICAST;
4475
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004476 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004477
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004478 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004479
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004480 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004481}
4482
4483static void be_unmap_pci_bars(struct be_adapter *adapter)
4484{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004485 if (adapter->csr)
4486 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004487 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004488 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004489}
4490
Sathya Perlace66f782012-11-06 17:48:58 +00004491static int db_bar(struct be_adapter *adapter)
4492{
4493 if (lancer_chip(adapter) || !be_physfn(adapter))
4494 return 0;
4495 else
4496 return 4;
4497}
4498
4499static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004500{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004501 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004502 adapter->roce_db.size = 4096;
4503 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4504 db_bar(adapter));
4505 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4506 db_bar(adapter));
4507 }
Parav Pandit045508a2012-03-26 14:27:13 +00004508 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004509}
4510
4511static int be_map_pci_bars(struct be_adapter *adapter)
4512{
4513 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004514
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004515 if (BEx_chip(adapter) && be_physfn(adapter)) {
4516 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304517 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004518 return -ENOMEM;
4519 }
4520
Sathya Perlace66f782012-11-06 17:48:58 +00004521 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304522 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004523 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004524 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004525
4526 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004528
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004529pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304530 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531 be_unmap_pci_bars(adapter);
4532 return -ENOMEM;
4533}
4534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004535static void be_ctrl_cleanup(struct be_adapter *adapter)
4536{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004537 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538
4539 be_unmap_pci_bars(adapter);
4540
4541 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004542 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4543 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004544
Sathya Perla5b8821b2011-08-02 19:57:44 +00004545 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004546 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004547 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4548 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549}
4550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004551static int be_ctrl_init(struct be_adapter *adapter)
4552{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004553 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4554 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004555 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004556 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004557 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004558
Sathya Perlace66f782012-11-06 17:48:58 +00004559 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4560 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4561 SLI_INTF_FAMILY_SHIFT;
4562 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564 status = be_map_pci_bars(adapter);
4565 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004566 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004567
4568 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004569 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4570 mbox_mem_alloc->size,
4571 &mbox_mem_alloc->dma,
4572 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004573 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004574 status = -ENOMEM;
4575 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004576 }
4577 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4578 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4579 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4580 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004581
Sathya Perla5b8821b2011-08-02 19:57:44 +00004582 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004583 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4584 rx_filter->size, &rx_filter->dma,
4585 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304586 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004587 status = -ENOMEM;
4588 goto free_mbox;
4589 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004590
Ivan Vecera29849612010-12-14 05:43:19 +00004591 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004592 spin_lock_init(&adapter->mcc_lock);
4593 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004594
Suresh Reddy5eeff632014-01-06 13:02:24 +05304595 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004596 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004597 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004598
4599free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004600 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4601 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004602
4603unmap_pci_bars:
4604 be_unmap_pci_bars(adapter);
4605
4606done:
4607 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004608}
4609
4610static void be_stats_cleanup(struct be_adapter *adapter)
4611{
Sathya Perla3abcded2010-10-03 22:12:27 -07004612 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004613
4614 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004615 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4616 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004617}
4618
4619static int be_stats_init(struct be_adapter *adapter)
4620{
Sathya Perla3abcded2010-10-03 22:12:27 -07004621 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004622
Sathya Perlaca34fe32012-11-06 17:48:56 +00004623 if (lancer_chip(adapter))
4624 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4625 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004626 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004627 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004628 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004629 else
4630 /* ALL non-BE ASICs */
4631 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004632
Joe Perchesede23fa2013-08-26 22:45:23 -07004633 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4634 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304635 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304636 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004637 return 0;
4638}
4639
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004640static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004641{
4642 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004644 if (!adapter)
4645 return;
4646
Parav Pandit045508a2012-03-26 14:27:13 +00004647 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004648 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004649
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004650 cancel_delayed_work_sync(&adapter->func_recovery_work);
4651
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004652 unregister_netdev(adapter->netdev);
4653
Sathya Perla5fb379e2009-06-18 00:02:59 +00004654 be_clear(adapter);
4655
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004656 /* tell fw we're done with firing cmds */
4657 be_cmd_fw_clean(adapter);
4658
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004659 be_stats_cleanup(adapter);
4660
4661 be_ctrl_cleanup(adapter);
4662
Sathya Perlad6b6d982012-09-05 01:56:48 +00004663 pci_disable_pcie_error_reporting(pdev);
4664
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004665 pci_release_regions(pdev);
4666 pci_disable_device(pdev);
4667
4668 free_netdev(adapter->netdev);
4669}
4670
Sathya Perla39f1d942012-05-08 19:41:24 +00004671static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004672{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304673 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004674
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004675 status = be_cmd_get_cntl_attributes(adapter);
4676 if (status)
4677 return status;
4678
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004679 /* Must be a power of 2 or else MODULO will BUG_ON */
4680 adapter->be_get_temp_freq = 64;
4681
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304682 if (BEx_chip(adapter)) {
4683 level = be_cmd_get_fw_log_level(adapter);
4684 adapter->msg_enable =
4685 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4686 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004687
Sathya Perla92bf14a2013-08-27 16:57:32 +05304688 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004689 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004690}
4691
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004692static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004693{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004694 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004695 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004696
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004697 status = lancer_test_and_set_rdy_state(adapter);
4698 if (status)
4699 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004700
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004701 if (netif_running(adapter->netdev))
4702 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004703
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004704 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004705
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004706 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004707
4708 status = be_setup(adapter);
4709 if (status)
4710 goto err;
4711
4712 if (netif_running(adapter->netdev)) {
4713 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004714 if (status)
4715 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004716 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004717
Somnath Kotur4bebb562013-12-05 12:07:55 +05304718 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004719 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004720err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004721 if (status == -EAGAIN)
4722 dev_err(dev, "Waiting for resource provisioning\n");
4723 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304724 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004725
4726 return status;
4727}
4728
4729static void be_func_recovery_task(struct work_struct *work)
4730{
4731 struct be_adapter *adapter =
4732 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004733 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004734
4735 be_detect_error(adapter);
4736
4737 if (adapter->hw_error && lancer_chip(adapter)) {
4738
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004739 rtnl_lock();
4740 netif_device_detach(adapter->netdev);
4741 rtnl_unlock();
4742
4743 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004744 if (!status)
4745 netif_device_attach(adapter->netdev);
4746 }
4747
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004748 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4749 * no need to attempt further recovery.
4750 */
4751 if (!status || status == -EAGAIN)
4752 schedule_delayed_work(&adapter->func_recovery_work,
4753 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004754}
4755
4756static void be_worker(struct work_struct *work)
4757{
4758 struct be_adapter *adapter =
4759 container_of(work, struct be_adapter, work.work);
4760 struct be_rx_obj *rxo;
4761 int i;
4762
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004763 /* when interrupts are not yet enabled, just reap any pending
4764 * mcc completions */
4765 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004766 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004767 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004768 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004769 goto reschedule;
4770 }
4771
4772 if (!adapter->stats_cmd_sent) {
4773 if (lancer_chip(adapter))
4774 lancer_cmd_get_pport_stats(adapter,
4775 &adapter->stats_cmd);
4776 else
4777 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4778 }
4779
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304780 if (be_physfn(adapter) &&
4781 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004782 be_cmd_get_die_temperature(adapter);
4783
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004784 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304785 /* Replenish RX-queues starved due to memory
4786 * allocation failures.
4787 */
4788 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304789 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004790 }
4791
Sathya Perla2632baf2013-10-01 16:00:00 +05304792 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004793
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004794reschedule:
4795 adapter->work_counter++;
4796 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4797}
4798
Sathya Perla257a3fe2013-06-14 15:54:51 +05304799/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004800static bool be_reset_required(struct be_adapter *adapter)
4801{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304802 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004803}
4804
Sathya Perlad3791422012-09-28 04:39:44 +00004805static char *mc_name(struct be_adapter *adapter)
4806{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304807 char *str = ""; /* default */
4808
4809 switch (adapter->mc_type) {
4810 case UMC:
4811 str = "UMC";
4812 break;
4813 case FLEX10:
4814 str = "FLEX10";
4815 break;
4816 case vNIC1:
4817 str = "vNIC-1";
4818 break;
4819 case nPAR:
4820 str = "nPAR";
4821 break;
4822 case UFP:
4823 str = "UFP";
4824 break;
4825 case vNIC2:
4826 str = "vNIC-2";
4827 break;
4828 default:
4829 str = "";
4830 }
4831
4832 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004833}
4834
4835static inline char *func_name(struct be_adapter *adapter)
4836{
4837 return be_physfn(adapter) ? "PF" : "VF";
4838}
4839
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004840static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004841{
4842 int status = 0;
4843 struct be_adapter *adapter;
4844 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004845 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004846
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304847 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004849 status = pci_enable_device(pdev);
4850 if (status)
4851 goto do_none;
4852
4853 status = pci_request_regions(pdev, DRV_NAME);
4854 if (status)
4855 goto disable_dev;
4856 pci_set_master(pdev);
4857
Sathya Perla7f640062012-06-05 19:37:20 +00004858 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304859 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004860 status = -ENOMEM;
4861 goto rel_reg;
4862 }
4863 adapter = netdev_priv(netdev);
4864 adapter->pdev = pdev;
4865 pci_set_drvdata(pdev, adapter);
4866 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004867 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004868
Russell King4c15c242013-06-26 23:49:11 +01004869 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004870 if (!status) {
4871 netdev->features |= NETIF_F_HIGHDMA;
4872 } else {
Russell King4c15c242013-06-26 23:49:11 +01004873 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004874 if (status) {
4875 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4876 goto free_netdev;
4877 }
4878 }
4879
Ajit Khapardeea58c182013-10-18 16:06:24 -05004880 if (be_physfn(adapter)) {
4881 status = pci_enable_pcie_error_reporting(pdev);
4882 if (!status)
4883 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4884 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004885
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004886 status = be_ctrl_init(adapter);
4887 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004888 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004889
Sathya Perla2243e2e2009-11-22 22:02:03 +00004890 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004891 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004892 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004893 if (status)
4894 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004895 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004896
Sathya Perla39f1d942012-05-08 19:41:24 +00004897 if (be_reset_required(adapter)) {
4898 status = be_cmd_reset_function(adapter);
4899 if (status)
4900 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004901
Kalesh AP2d177be2013-04-28 22:22:29 +00004902 /* Wait for interrupts to quiesce after an FLR */
4903 msleep(100);
4904 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004905
4906 /* Allow interrupts for other ULPs running on NIC function */
4907 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004908
Kalesh AP2d177be2013-04-28 22:22:29 +00004909 /* tell fw we're ready to fire cmds */
4910 status = be_cmd_fw_init(adapter);
4911 if (status)
4912 goto ctrl_clean;
4913
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004914 status = be_stats_init(adapter);
4915 if (status)
4916 goto ctrl_clean;
4917
Sathya Perla39f1d942012-05-08 19:41:24 +00004918 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004919 if (status)
4920 goto stats_clean;
4921
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004922 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004923 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004924 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004925
Sathya Perla5fb379e2009-06-18 00:02:59 +00004926 status = be_setup(adapter);
4927 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004928 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004929
Sathya Perla3abcded2010-10-03 22:12:27 -07004930 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004931 status = register_netdev(netdev);
4932 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004933 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004934
Parav Pandit045508a2012-03-26 14:27:13 +00004935 be_roce_dev_add(adapter);
4936
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004937 schedule_delayed_work(&adapter->func_recovery_work,
4938 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004939
4940 be_cmd_query_port_name(adapter, &port_name);
4941
Sathya Perlad3791422012-09-28 04:39:44 +00004942 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4943 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004944
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004945 return 0;
4946
Sathya Perla5fb379e2009-06-18 00:02:59 +00004947unsetup:
4948 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004949stats_clean:
4950 be_stats_cleanup(adapter);
4951ctrl_clean:
4952 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004953free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004954 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004955rel_reg:
4956 pci_release_regions(pdev);
4957disable_dev:
4958 pci_disable_device(pdev);
4959do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004960 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004961 return status;
4962}
4963
4964static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4965{
4966 struct be_adapter *adapter = pci_get_drvdata(pdev);
4967 struct net_device *netdev = adapter->netdev;
4968
Suresh Reddy76a9e082014-01-15 13:23:40 +05304969 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004970 be_setup_wol(adapter, true);
4971
Ajit Khaparded4360d62013-11-22 12:51:09 -06004972 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004973 cancel_delayed_work_sync(&adapter->func_recovery_work);
4974
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004975 netif_device_detach(netdev);
4976 if (netif_running(netdev)) {
4977 rtnl_lock();
4978 be_close(netdev);
4979 rtnl_unlock();
4980 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004981 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004982
4983 pci_save_state(pdev);
4984 pci_disable_device(pdev);
4985 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4986 return 0;
4987}
4988
4989static int be_resume(struct pci_dev *pdev)
4990{
4991 int status = 0;
4992 struct be_adapter *adapter = pci_get_drvdata(pdev);
4993 struct net_device *netdev = adapter->netdev;
4994
4995 netif_device_detach(netdev);
4996
4997 status = pci_enable_device(pdev);
4998 if (status)
4999 return status;
5000
Yijing Wang1ca01512013-06-27 20:53:42 +08005001 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005002 pci_restore_state(pdev);
5003
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305004 status = be_fw_wait_ready(adapter);
5005 if (status)
5006 return status;
5007
Ajit Khaparded4360d62013-11-22 12:51:09 -06005008 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005009 /* tell fw we're ready to fire cmds */
5010 status = be_cmd_fw_init(adapter);
5011 if (status)
5012 return status;
5013
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005014 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005015 if (netif_running(netdev)) {
5016 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005017 be_open(netdev);
5018 rtnl_unlock();
5019 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005020
5021 schedule_delayed_work(&adapter->func_recovery_work,
5022 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005023 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005024
Suresh Reddy76a9e082014-01-15 13:23:40 +05305025 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005026 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005027
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005028 return 0;
5029}
5030
Sathya Perla82456b02010-02-17 01:35:37 +00005031/*
5032 * An FLR will stop BE from DMAing any data.
5033 */
5034static void be_shutdown(struct pci_dev *pdev)
5035{
5036 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005037
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005038 if (!adapter)
5039 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005040
Devesh Sharmad114f992014-06-10 19:32:15 +05305041 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005042 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005043 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005044
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005045 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005046
Ajit Khaparde57841862011-04-06 18:08:43 +00005047 be_cmd_reset_function(adapter);
5048
Sathya Perla82456b02010-02-17 01:35:37 +00005049 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005050}
5051
Sathya Perlacf588472010-02-14 21:22:01 +00005052static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305053 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005054{
5055 struct be_adapter *adapter = pci_get_drvdata(pdev);
5056 struct net_device *netdev = adapter->netdev;
5057
5058 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5059
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005060 if (!adapter->eeh_error) {
5061 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005062
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005063 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005064
Sathya Perlacf588472010-02-14 21:22:01 +00005065 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005066 netif_device_detach(netdev);
5067 if (netif_running(netdev))
5068 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005069 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005070
5071 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005072 }
Sathya Perlacf588472010-02-14 21:22:01 +00005073
5074 if (state == pci_channel_io_perm_failure)
5075 return PCI_ERS_RESULT_DISCONNECT;
5076
5077 pci_disable_device(pdev);
5078
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005079 /* The error could cause the FW to trigger a flash debug dump.
5080 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005081 * can cause it not to recover; wait for it to finish.
5082 * Wait only for first function as it is needed only once per
5083 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005084 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005085 if (pdev->devfn == 0)
5086 ssleep(30);
5087
Sathya Perlacf588472010-02-14 21:22:01 +00005088 return PCI_ERS_RESULT_NEED_RESET;
5089}
5090
5091static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5092{
5093 struct be_adapter *adapter = pci_get_drvdata(pdev);
5094 int status;
5095
5096 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005097
5098 status = pci_enable_device(pdev);
5099 if (status)
5100 return PCI_ERS_RESULT_DISCONNECT;
5101
5102 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005103 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005104 pci_restore_state(pdev);
5105
5106 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005107 dev_info(&adapter->pdev->dev,
5108 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005109 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005110 if (status)
5111 return PCI_ERS_RESULT_DISCONNECT;
5112
Sathya Perlad6b6d982012-09-05 01:56:48 +00005113 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005114 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005115 return PCI_ERS_RESULT_RECOVERED;
5116}
5117
5118static void be_eeh_resume(struct pci_dev *pdev)
5119{
5120 int status = 0;
5121 struct be_adapter *adapter = pci_get_drvdata(pdev);
5122 struct net_device *netdev = adapter->netdev;
5123
5124 dev_info(&adapter->pdev->dev, "EEH resume\n");
5125
5126 pci_save_state(pdev);
5127
Kalesh AP2d177be2013-04-28 22:22:29 +00005128 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005129 if (status)
5130 goto err;
5131
Kalesh AP03a58ba2014-05-13 14:03:11 +05305132 /* On some BE3 FW versions, after a HW reset,
5133 * interrupts will remain disabled for each function.
5134 * So, explicitly enable interrupts
5135 */
5136 be_intr_set(adapter, true);
5137
Kalesh AP2d177be2013-04-28 22:22:29 +00005138 /* tell fw we're ready to fire cmds */
5139 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005140 if (status)
5141 goto err;
5142
Sathya Perlacf588472010-02-14 21:22:01 +00005143 status = be_setup(adapter);
5144 if (status)
5145 goto err;
5146
5147 if (netif_running(netdev)) {
5148 status = be_open(netdev);
5149 if (status)
5150 goto err;
5151 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005152
5153 schedule_delayed_work(&adapter->func_recovery_work,
5154 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005155 netif_device_attach(netdev);
5156 return;
5157err:
5158 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005159}
5160
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005161static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005162 .error_detected = be_eeh_err_detected,
5163 .slot_reset = be_eeh_reset,
5164 .resume = be_eeh_resume,
5165};
5166
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005167static struct pci_driver be_driver = {
5168 .name = DRV_NAME,
5169 .id_table = be_dev_ids,
5170 .probe = be_probe,
5171 .remove = be_remove,
5172 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005173 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005174 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005175 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005176};
5177
5178static int __init be_init_module(void)
5179{
Joe Perches8e95a202009-12-03 07:58:21 +00005180 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5181 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005182 printk(KERN_WARNING DRV_NAME
5183 " : Module param rx_frag_size must be 2048/4096/8192."
5184 " Using 2048\n");
5185 rx_frag_size = 2048;
5186 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005187
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005188 return pci_register_driver(&be_driver);
5189}
5190module_init(be_init_module);
5191
5192static void __exit be_exit_module(void)
5193{
5194 pci_unregister_driver(&be_driver);
5195}
5196module_exit(be_exit_module);