blob: 2cd733b5de73c0d3edfe122c60f4d8bbf05034a1 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Sathya Perla748b5392014-05-09 13:29:13 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001113 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001114 /* Set to VLAN promisc mode as setting VLAN filter failed */
1115 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Patrick McHardy80d5c362013-04-19 02:04:28 +00001148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301155 return status;
1156
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301157 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301158 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301161 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001162
Somnath Kotura6b74e02014-01-21 15:50:55 +05301163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301168
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001169 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170}
1171
Patrick McHardy80d5c362013-04-19 02:04:28 +00001172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001175 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001177 /* Packets with VID 0 are always received by Lancer by default */
1178 if (lancer_chip(adapter) && vid == 0)
1179 goto ret;
1180
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301181 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301182 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001183 if (!status)
1184 adapter->vlans_added--;
1185 else
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301186 set_bit(vid, adapter->vids);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001187ret:
1188 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189}
1190
Somnath kotur7ad09452014-03-03 14:24:43 +05301191static void be_clear_promisc(struct be_adapter *adapter)
1192{
1193 adapter->promiscuous = false;
1194 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1195
1196 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1197}
1198
Sathya Perlaa54769f2011-10-24 02:45:00 +00001199static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200{
1201 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001202 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203
1204 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001205 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001206 adapter->promiscuous = true;
1207 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001209
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001210 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001211 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301212 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001213 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001214 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001215 }
1216
Sathya Perlae7b909a2009-11-22 22:01:10 +00001217 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001218 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301219 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001220 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001221 goto done;
1222 }
1223
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001224 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1225 struct netdev_hw_addr *ha;
1226 int i = 1; /* First slot is claimed by the Primary MAC */
1227
1228 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1229 be_cmd_pmac_del(adapter, adapter->if_handle,
1230 adapter->pmac_id[i], 0);
1231 }
1232
Sathya Perla92bf14a2013-08-27 16:57:32 +05301233 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001234 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1235 adapter->promiscuous = true;
1236 goto done;
1237 }
1238
1239 netdev_for_each_uc_addr(ha, adapter->netdev) {
1240 adapter->uc_macs++; /* First slot is for Primary MAC */
1241 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1242 adapter->if_handle,
1243 &adapter->pmac_id[adapter->uc_macs], 0);
1244 }
1245 }
1246
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001247 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1248
1249 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1250 if (status) {
Sathya Perla748b5392014-05-09 13:29:13 +05301251 dev_info(&adapter->pdev->dev,
1252 "Exhausted multicast HW filters.\n");
1253 dev_info(&adapter->pdev->dev,
1254 "Disabling HW multicast filtering.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001255 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1256 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001257done:
1258 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259}
1260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001265 int status;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268 return -EPERM;
1269
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return -EINVAL;
1272
Sathya Perla3175d8c2013-07-23 15:25:03 +05301273 if (BEx_chip(adapter)) {
1274 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1275 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1278 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301279 } else {
1280 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1281 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001282 }
1283
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001284 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001285 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301286 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001287 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001288 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001289
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001290 return status;
1291}
1292
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001293static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301294 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001295{
1296 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001297 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001298
Sathya Perla11ac75e2011-12-13 00:58:50 +00001299 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001300 return -EPERM;
1301
Sathya Perla11ac75e2011-12-13 00:58:50 +00001302 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303 return -EINVAL;
1304
1305 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001306 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001307 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1308 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001309 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301310 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311
1312 return 0;
1313}
1314
Sathya Perla748b5392014-05-09 13:29:13 +05301315static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001316{
1317 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001318 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001319 int status = 0;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001322 return -EPERM;
1323
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001324 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325 return -EINVAL;
1326
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001327 if (vlan || qos) {
1328 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301329 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001330 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1331 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001332 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001333 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301334 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1335 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001336 }
1337
Somnath Koturc5022242014-03-03 14:24:20 +05301338 if (!status)
1339 vf_cfg->vlan_tag = vlan;
1340 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301342 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001343 return status;
1344}
1345
Sathya Perla748b5392014-05-09 13:29:13 +05301346static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001347{
1348 struct be_adapter *adapter = netdev_priv(netdev);
1349 int status = 0;
1350
Sathya Perla11ac75e2011-12-13 00:58:50 +00001351 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001352 return -EPERM;
1353
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001354 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001355 return -EINVAL;
1356
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001357 if (rate < 100 || rate > 10000) {
1358 dev_err(&adapter->pdev->dev,
1359 "tx rate must be between 100 and 10000 Mbps\n");
1360 return -EINVAL;
1361 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001362
Sathya Perlaa4018012014-03-27 10:46:18 +05301363 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001364 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001365 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301366 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001367 else
1368 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001369 return status;
1370}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301371static int be_set_vf_link_state(struct net_device *netdev, int vf,
1372 int link_state)
1373{
1374 struct be_adapter *adapter = netdev_priv(netdev);
1375 int status;
1376
1377 if (!sriov_enabled(adapter))
1378 return -EPERM;
1379
1380 if (vf >= adapter->num_vfs)
1381 return -EINVAL;
1382
1383 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1384 if (!status)
1385 adapter->vf_cfg[vf].plink_tracking = link_state;
1386
1387 return status;
1388}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001389
Sathya Perla2632baf2013-10-01 16:00:00 +05301390static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1391 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392{
Sathya Perla2632baf2013-10-01 16:00:00 +05301393 aic->rx_pkts_prev = rx_pkts;
1394 aic->tx_reqs_prev = tx_pkts;
1395 aic->jiffies = now;
1396}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001397
Sathya Perla2632baf2013-10-01 16:00:00 +05301398static void be_eqd_update(struct be_adapter *adapter)
1399{
1400 struct be_set_eqd set_eqd[MAX_EVT_QS];
1401 int eqd, i, num = 0, start;
1402 struct be_aic_obj *aic;
1403 struct be_eq_obj *eqo;
1404 struct be_rx_obj *rxo;
1405 struct be_tx_obj *txo;
1406 u64 rx_pkts, tx_pkts;
1407 ulong now;
1408 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001409
Sathya Perla2632baf2013-10-01 16:00:00 +05301410 for_all_evt_queues(adapter, eqo, i) {
1411 aic = &adapter->aic_obj[eqo->idx];
1412 if (!aic->enable) {
1413 if (aic->jiffies)
1414 aic->jiffies = 0;
1415 eqd = aic->et_eqd;
1416 goto modify_eqd;
1417 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418
Sathya Perla2632baf2013-10-01 16:00:00 +05301419 rxo = &adapter->rx_obj[eqo->idx];
1420 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001421 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301422 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001423 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001424
Sathya Perla2632baf2013-10-01 16:00:00 +05301425 txo = &adapter->tx_obj[eqo->idx];
1426 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001427 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301428 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001429 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001430
Sathya Perla4097f662009-03-24 16:40:13 -07001431
Sathya Perla2632baf2013-10-01 16:00:00 +05301432 /* Skip, if wrapped around or first calculation */
1433 now = jiffies;
1434 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1435 rx_pkts < aic->rx_pkts_prev ||
1436 tx_pkts < aic->tx_reqs_prev) {
1437 be_aic_update(aic, rx_pkts, tx_pkts, now);
1438 continue;
1439 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001440
Sathya Perla2632baf2013-10-01 16:00:00 +05301441 delta = jiffies_to_msecs(now - aic->jiffies);
1442 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1443 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1444 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001445
Sathya Perla2632baf2013-10-01 16:00:00 +05301446 if (eqd < 8)
1447 eqd = 0;
1448 eqd = min_t(u32, eqd, aic->max_eqd);
1449 eqd = max_t(u32, eqd, aic->min_eqd);
1450
1451 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001452modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301453 if (eqd != aic->prev_eqd) {
1454 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1455 set_eqd[num].eq_id = eqo->q.id;
1456 aic->prev_eqd = eqd;
1457 num++;
1458 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001459 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301460
1461 if (num)
1462 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001463}
1464
Sathya Perla3abcded2010-10-03 22:12:27 -07001465static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301466 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001467{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001468 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001469
Sathya Perlaab1594e2011-07-25 19:10:15 +00001470 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001471 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001473 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001474 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001475 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001476 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001477 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001478 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479}
1480
Sathya Perla2e588f82011-03-11 02:49:26 +00001481static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001482{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001483 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301484 * Also ignore ipcksm for ipv6 pkts
1485 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001486 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301487 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001488}
1489
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301490static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001492 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001494 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301495 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 BUG_ON(!rx_page_info->page);
1499
Sathya Perlae50287b2014-03-04 12:14:38 +05301500 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001501 dma_unmap_page(&adapter->pdev->dev,
1502 dma_unmap_addr(rx_page_info, bus),
1503 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301504 rx_page_info->last_frag = false;
1505 } else {
1506 dma_sync_single_for_cpu(&adapter->pdev->dev,
1507 dma_unmap_addr(rx_page_info, bus),
1508 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001509 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301511 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 atomic_dec(&rxq->used);
1513 return rx_page_info;
1514}
1515
1516/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517static void be_rx_compl_discard(struct be_rx_obj *rxo,
1518 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001521 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001523 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301524 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001525 put_page(page_info->page);
1526 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 }
1528}
1529
1530/*
1531 * skb_fill_rx_data forms a complete skb for an ether frame
1532 * indicated by rxcp.
1533 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001534static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1535 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001538 u16 i, j;
1539 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 u8 *start;
1541
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301542 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 start = page_address(page_info->page) + page_info->page_offset;
1544 prefetch(start);
1545
1546 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001547 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 skb->len = curr_frag_len;
1550 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001551 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 /* Complete packet has now been moved to data */
1553 put_page(page_info->page);
1554 skb->data_len = 0;
1555 skb->tail += curr_frag_len;
1556 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001557 hdr_len = ETH_HLEN;
1558 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001560 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 skb_shinfo(skb)->frags[0].page_offset =
1562 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301563 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1564 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001566 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 skb->tail += hdr_len;
1568 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001569 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
Sathya Perla2e588f82011-03-11 02:49:26 +00001571 if (rxcp->pkt_size <= rx_frag_size) {
1572 BUG_ON(rxcp->num_rcvd != 1);
1573 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 }
1575
1576 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001577 remaining = rxcp->pkt_size - curr_frag_len;
1578 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301579 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001580 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001582 /* Coalesce all frags from the same physical page in one slot */
1583 if (page_info->page_offset == 0) {
1584 /* Fresh page */
1585 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001586 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001587 skb_shinfo(skb)->frags[j].page_offset =
1588 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001589 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001590 skb_shinfo(skb)->nr_frags++;
1591 } else {
1592 put_page(page_info->page);
1593 }
1594
Eric Dumazet9e903e02011-10-18 21:00:24 +00001595 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 skb->len += curr_frag_len;
1597 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001598 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001599 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001600 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001602 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603}
1604
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001605/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301606static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001609 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001610 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001612
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001613 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001614 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001615 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001616 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 return;
1618 }
1619
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001620 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001622 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001623 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001624 else
1625 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001627 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001628 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001629 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001630 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301631
1632 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301633 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Jiri Pirko343e43c2011-08-25 02:50:51 +00001635 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001637
1638 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639}
1640
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001641/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001642static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1643 struct napi_struct *napi,
1644 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001648 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001649 u16 remaining, curr_frag_len;
1650 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001651
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001652 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001653 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001654 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001655 return;
1656 }
1657
Sathya Perla2e588f82011-03-11 02:49:26 +00001658 remaining = rxcp->pkt_size;
1659 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301660 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661
1662 curr_frag_len = min(remaining, rx_frag_size);
1663
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001664 /* Coalesce all frags from the same physical page in one slot */
1665 if (i == 0 || page_info->page_offset == 0) {
1666 /* First frag or Fresh page */
1667 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001668 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001669 skb_shinfo(skb)->frags[j].page_offset =
1670 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001671 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001672 } else {
1673 put_page(page_info->page);
1674 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001675 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001676 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 memset(page_info, 0, sizeof(*page_info));
1679 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001680 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001682 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001683 skb->len = rxcp->pkt_size;
1684 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001685 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001686 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001687 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001688 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301689
1690 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301691 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001692
Jiri Pirko343e43c2011-08-25 02:50:51 +00001693 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001694 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001695
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001696 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697}
1698
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001699static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1700 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701{
Sathya Perla2e588f82011-03-11 02:49:26 +00001702 rxcp->pkt_size =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1704 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1705 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1706 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001707 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001708 rxcp->ip_csum =
1709 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1710 rxcp->l4_csum =
1711 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1712 rxcp->ipv6 =
1713 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001714 rxcp->num_rcvd =
1715 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1716 rxcp->pkt_type =
1717 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001718 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001719 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001720 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301721 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001722 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301723 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1724 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001725 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001726 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301727 rxcp->tunneled =
1728 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001729}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001731static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1732 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001733{
1734 rxcp->pkt_size =
1735 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1736 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1737 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1738 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001739 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001740 rxcp->ip_csum =
1741 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1742 rxcp->l4_csum =
1743 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1744 rxcp->ipv6 =
1745 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001746 rxcp->num_rcvd =
1747 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1748 rxcp->pkt_type =
1749 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001750 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001751 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001752 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301753 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001754 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301755 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1756 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001757 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001758 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001759 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1760 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001761}
1762
1763static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1764{
1765 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1766 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1767 struct be_adapter *adapter = rxo->adapter;
1768
1769 /* For checking the valid bit it is Ok to use either definition as the
1770 * valid bit is at the same position in both v0 and v1 Rx compl */
1771 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 return NULL;
1773
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001774 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001775 be_dws_le_to_cpu(compl, sizeof(*compl));
1776
1777 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001778 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001779 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001780 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001781
Somnath Koture38b1702013-05-29 22:55:56 +00001782 if (rxcp->ip_frag)
1783 rxcp->l4_csum = 0;
1784
Sathya Perla15d72182011-03-21 20:49:26 +00001785 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301786 /* In QNQ modes, if qnq bit is not set, then the packet was
1787 * tagged only with the transparent outer vlan-tag and must
1788 * not be treated as a vlan packet by host
1789 */
1790 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001791 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001792
Sathya Perla15d72182011-03-21 20:49:26 +00001793 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001794 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001795
Somnath Kotur939cf302011-08-18 21:51:49 -07001796 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301797 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001798 rxcp->vlanf = 0;
1799 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001800
1801 /* As the compl has been parsed, reset it; we wont touch it again */
1802 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perla3abcded2010-10-03 22:12:27 -07001804 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 return rxcp;
1806}
1807
Eric Dumazet1829b082011-03-01 05:48:12 +00001808static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001811
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001813 gfp |= __GFP_COMP;
1814 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815}
1816
1817/*
1818 * Allocate a page, split it to fragments of size rx_frag_size and post as
1819 * receive buffers to BE
1820 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001821static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822{
Sathya Perla3abcded2010-10-03 22:12:27 -07001823 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001824 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001827 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 struct be_eth_rx_d *rxd;
1829 u64 page_dmaaddr = 0, frag_dmaaddr;
1830 u32 posted, page_offset = 0;
1831
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1834 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001835 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001837 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838 break;
1839 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001840 page_dmaaddr = dma_map_page(dev, pagep, 0,
1841 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001842 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001843 if (dma_mapping_error(dev, page_dmaaddr)) {
1844 put_page(pagep);
1845 pagep = NULL;
1846 rx_stats(rxo)->rx_post_fail++;
1847 break;
1848 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301849 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 } else {
1851 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301852 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301854 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
1857 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301858 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1860 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
1862 /* Any space left in the current big page for another frag? */
1863 if ((page_offset + rx_frag_size + rx_frag_size) >
1864 adapter->big_page_size) {
1865 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301866 page_info->last_frag = true;
1867 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1868 } else {
1869 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001871
1872 prev_page_info = page_info;
1873 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001874 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301876
1877 /* Mark the last frag of a page when we break out of the above loop
1878 * with no more slots available in the RXQ
1879 */
1880 if (pagep) {
1881 prev_page_info->last_frag = true;
1882 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1883 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884
1885 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301887 if (rxo->rx_post_starved)
1888 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001889 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001890 } else if (atomic_read(&rxq->used) == 0) {
1891 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001892 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894}
1895
Sathya Perla5fb379e2009-06-18 00:02:59 +00001896static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1899
1900 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1901 return NULL;
1902
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001903 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1905
1906 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1907
1908 queue_tail_inc(tx_cq);
1909 return txcp;
1910}
1911
Sathya Perla3c8def92011-06-12 20:01:58 +00001912static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301913 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914{
Sathya Perla3c8def92011-06-12 20:01:58 +00001915 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001916 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001917 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001919 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1920 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001922 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001924 sent_skbs[txq->tail] = NULL;
1925
1926 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001927 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001929 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001931 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001932 unmap_tx_frag(&adapter->pdev->dev, wrb,
1933 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001934 unmap_skb_hdr = false;
1935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 num_wrbs++;
1937 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001938 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001940 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001941 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942}
1943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944/* Return the number of events in the event queue */
1945static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001946{
1947 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 do {
1951 eqe = queue_tail_node(&eqo->q);
1952 if (eqe->evt == 0)
1953 break;
1954
1955 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001956 eqe->evt = 0;
1957 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 queue_tail_inc(&eqo->q);
1959 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001960
1961 return num;
1962}
1963
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964/* Leaves the EQ is disarmed state */
1965static void be_eq_clean(struct be_eq_obj *eqo)
1966{
1967 int num = events_get(eqo);
1968
1969 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1970}
1971
1972static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973{
1974 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001975 struct be_queue_info *rxq = &rxo->q;
1976 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001977 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001978 struct be_adapter *adapter = rxo->adapter;
1979 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980
Sathya Perlad23e9462012-12-17 19:38:51 +00001981 /* Consume pending rx completions.
1982 * Wait for the flush completion (identified by zero num_rcvd)
1983 * to arrive. Notify CQ even when there are no more CQ entries
1984 * for HW to flush partially coalesced CQ entries.
1985 * In Lancer, there is no need to wait for flush compl.
1986 */
1987 for (;;) {
1988 rxcp = be_rx_compl_get(rxo);
1989 if (rxcp == NULL) {
1990 if (lancer_chip(adapter))
1991 break;
1992
1993 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1994 dev_warn(&adapter->pdev->dev,
1995 "did not receive flush compl\n");
1996 break;
1997 }
1998 be_cq_notify(adapter, rx_cq->id, true, 0);
1999 mdelay(1);
2000 } else {
2001 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002002 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002003 if (rxcp->num_rcvd == 0)
2004 break;
2005 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006 }
2007
Sathya Perlad23e9462012-12-17 19:38:51 +00002008 /* After cleanup, leave the CQ in unarmed state */
2009 be_cq_notify(adapter, rx_cq->id, false, 0);
2010
2011 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302012 while (atomic_read(&rxq->used) > 0) {
2013 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014 put_page(page_info->page);
2015 memset(page_info, 0, sizeof(*page_info));
2016 }
2017 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002018 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019}
2020
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002021static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002023 struct be_tx_obj *txo;
2024 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002025 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002026 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002027 struct sk_buff *sent_skb;
2028 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002029 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302031 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002032 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002033 pending_txqs = adapter->num_tx_qs;
2034
2035 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302036 cmpl = 0;
2037 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002038 txq = &txo->q;
2039 while ((txcp = be_tx_compl_get(&txo->cq))) {
2040 end_idx =
2041 AMAP_GET_BITS(struct amap_eth_tx_compl,
2042 wrb_index, txcp);
2043 num_wrbs += be_tx_compl_process(adapter, txo,
2044 end_idx);
2045 cmpl++;
2046 }
2047 if (cmpl) {
2048 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2049 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302050 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002051 }
2052 if (atomic_read(&txq->used) == 0)
2053 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002054 }
2055
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302056 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002057 break;
2058
2059 mdelay(1);
2060 } while (true);
2061
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002062 for_all_tx_queues(adapter, txo, i) {
2063 txq = &txo->q;
2064 if (atomic_read(&txq->used))
2065 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2066 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002067
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002068 /* free posted tx for which compls will never arrive */
2069 while (atomic_read(&txq->used)) {
2070 sent_skb = txo->sent_skb_list[txq->tail];
2071 end_idx = txq->tail;
2072 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2073 &dummy_wrb);
2074 index_adv(&end_idx, num_wrbs - 1, txq->len);
2075 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2076 atomic_sub(num_wrbs, &txq->used);
2077 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002078 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079}
2080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081static void be_evt_queues_destroy(struct be_adapter *adapter)
2082{
2083 struct be_eq_obj *eqo;
2084 int i;
2085
2086 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002087 if (eqo->q.created) {
2088 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302090 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302091 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002092 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 be_queue_free(adapter, &eqo->q);
2094 }
2095}
2096
2097static int be_evt_queues_create(struct be_adapter *adapter)
2098{
2099 struct be_queue_info *eq;
2100 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302101 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 int i, rc;
2103
Sathya Perla92bf14a2013-08-27 16:57:32 +05302104 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2105 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106
2107 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302108 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2109 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302110 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302111 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002112 eqo->adapter = adapter;
2113 eqo->tx_budget = BE_TX_BUDGET;
2114 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302115 aic->max_eqd = BE_MAX_EQD;
2116 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117
2118 eq = &eqo->q;
2119 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302120 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 if (rc)
2122 return rc;
2123
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302124 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 if (rc)
2126 return rc;
2127 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002128 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129}
2130
Sathya Perla5fb379e2009-06-18 00:02:59 +00002131static void be_mcc_queues_destroy(struct be_adapter *adapter)
2132{
2133 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002134
Sathya Perla8788fdc2009-07-27 22:52:03 +00002135 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002136 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002137 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002138 be_queue_free(adapter, q);
2139
Sathya Perla8788fdc2009-07-27 22:52:03 +00002140 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002141 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002142 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002143 be_queue_free(adapter, q);
2144}
2145
2146/* Must be called only after TX qs are created as MCC shares TX EQ */
2147static int be_mcc_queues_create(struct be_adapter *adapter)
2148{
2149 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002150
Sathya Perla8788fdc2009-07-27 22:52:03 +00002151 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002152 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302153 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002154 goto err;
2155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 /* Use the default EQ for MCC completions */
2157 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002158 goto mcc_cq_free;
2159
Sathya Perla8788fdc2009-07-27 22:52:03 +00002160 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002161 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2162 goto mcc_cq_destroy;
2163
Sathya Perla8788fdc2009-07-27 22:52:03 +00002164 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002165 goto mcc_q_free;
2166
2167 return 0;
2168
2169mcc_q_free:
2170 be_queue_free(adapter, q);
2171mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002172 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173mcc_cq_free:
2174 be_queue_free(adapter, cq);
2175err:
2176 return -1;
2177}
2178
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179static void be_tx_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002182 struct be_tx_obj *txo;
2183 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184
Sathya Perla3c8def92011-06-12 20:01:58 +00002185 for_all_tx_queues(adapter, txo, i) {
2186 q = &txo->q;
2187 if (q->created)
2188 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2189 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
Sathya Perla3c8def92011-06-12 20:01:58 +00002191 q = &txo->cq;
2192 if (q->created)
2193 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2194 be_queue_free(adapter, q);
2195 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196}
2197
Sathya Perla77071332013-08-27 16:57:34 +05302198static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002201 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302202 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Sathya Perla92bf14a2013-08-27 16:57:32 +05302204 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002205
Sathya Perla3c8def92011-06-12 20:01:58 +00002206 for_all_tx_queues(adapter, txo, i) {
2207 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2209 sizeof(struct be_eth_tx_compl));
2210 if (status)
2211 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212
John Stultz827da442013-10-07 15:51:58 -07002213 u64_stats_init(&txo->stats.sync);
2214 u64_stats_init(&txo->stats.sync_compl);
2215
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 /* If num_evt_qs is less than num_tx_qs, then more than
2217 * one txq share an eq
2218 */
2219 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2220 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2221 if (status)
2222 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2225 sizeof(struct be_eth_wrb));
2226 if (status)
2227 return status;
2228
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002229 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 if (status)
2231 return status;
2232 }
2233
Sathya Perlad3791422012-09-28 04:39:44 +00002234 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2235 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236 return 0;
2237}
2238
2239static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240{
2241 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002242 struct be_rx_obj *rxo;
2243 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244
Sathya Perla3abcded2010-10-03 22:12:27 -07002245 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 q = &rxo->cq;
2247 if (q->created)
2248 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2249 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251}
2252
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002254{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002255 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002256 struct be_rx_obj *rxo;
2257 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
Sathya Perla92bf14a2013-08-27 16:57:32 +05302259 /* We can create as many RSS rings as there are EQs. */
2260 adapter->num_rx_qs = adapter->num_evt_qs;
2261
2262 /* We'll use RSS only if atleast 2 RSS rings are supported.
2263 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302265 if (adapter->num_rx_qs > 1)
2266 adapter->num_rx_qs++;
2267
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002269 for_all_rx_queues(adapter, rxo, i) {
2270 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 cq = &rxo->cq;
2272 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302273 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002274 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276
John Stultz827da442013-10-07 15:51:58 -07002277 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2279 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002280 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283
Sathya Perlad3791422012-09-28 04:39:44 +00002284 dev_info(&adapter->pdev->dev,
2285 "created %d RSS queue(s) and 1 default RX queue\n",
2286 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002288}
2289
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290static irqreturn_t be_intx(int irq, void *dev)
2291{
Sathya Perlae49cc342012-11-27 19:50:02 +00002292 struct be_eq_obj *eqo = dev;
2293 struct be_adapter *adapter = eqo->adapter;
2294 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002296 /* IRQ is not expected when NAPI is scheduled as the EQ
2297 * will not be armed.
2298 * But, this can happen on Lancer INTx where it takes
2299 * a while to de-assert INTx or in BE2 where occasionaly
2300 * an interrupt may be raised even when EQ is unarmed.
2301 * If NAPI is already scheduled, then counting & notifying
2302 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002303 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002304 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002305 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002306 __napi_schedule(&eqo->napi);
2307 if (num_evts)
2308 eqo->spurious_intr = 0;
2309 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002310 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002311
2312 /* Return IRQ_HANDLED only for the the first spurious intr
2313 * after a valid intr to stop the kernel from branding
2314 * this irq as a bad one!
2315 */
2316 if (num_evts || eqo->spurious_intr++ == 0)
2317 return IRQ_HANDLED;
2318 else
2319 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320}
2321
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325
Sathya Perla0b545a62012-11-23 00:27:18 +00002326 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2327 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328 return IRQ_HANDLED;
2329}
2330
Sathya Perla2e588f82011-03-11 02:49:26 +00002331static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332{
Somnath Koture38b1702013-05-29 22:55:56 +00002333 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334}
2335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302337 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338{
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 struct be_adapter *adapter = rxo->adapter;
2340 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002341 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 u32 work_done;
2343
2344 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002345 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346 if (!rxcp)
2347 break;
2348
Sathya Perla12004ae2011-08-02 19:57:46 +00002349 /* Is it a flush compl that has no data */
2350 if (unlikely(rxcp->num_rcvd == 0))
2351 goto loop_continue;
2352
2353 /* Discard compl with partial DMA Lancer B0 */
2354 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002356 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002357 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002358
Sathya Perla12004ae2011-08-02 19:57:46 +00002359 /* On BE drop pkts that arrive due to imperfect filtering in
2360 * promiscuous mode on some skews
2361 */
2362 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302363 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002365 goto loop_continue;
2366 }
2367
Sathya Perla6384a4d2013-10-25 10:40:16 +05302368 /* Don't do gro when we're busy_polling */
2369 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002371 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302372 be_rx_compl_process(rxo, napi, rxcp);
2373
Sathya Perla12004ae2011-08-02 19:57:46 +00002374loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002375 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376 }
2377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 if (work_done) {
2379 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002380
Sathya Perla6384a4d2013-10-25 10:40:16 +05302381 /* When an rx-obj gets into post_starved state, just
2382 * let be_worker do the posting.
2383 */
2384 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2385 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389 return work_done;
2390}
2391
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2393 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 for (work_done = 0; work_done < budget; work_done++) {
2399 txcp = be_tx_compl_get(&txo->cq);
2400 if (!txcp)
2401 break;
2402 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302403 AMAP_GET_BITS(struct
2404 amap_eth_tx_compl,
2405 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 }
2407
2408 if (work_done) {
2409 be_cq_notify(adapter, txo->cq.id, true, work_done);
2410 atomic_sub(num_wrbs, &txo->q.used);
2411
2412 /* As Tx wrbs have been freed up, wake up netdev queue
2413 * if it was stopped due to lack of tx wrbs. */
2414 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302415 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002417 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002418
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002419 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2420 tx_stats(txo)->tx_compl += work_done;
2421 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2422 }
2423 return (work_done < budget); /* Done */
2424}
Sathya Perla3c8def92011-06-12 20:01:58 +00002425
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302426int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427{
2428 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2429 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002430 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302431 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002433
Sathya Perla0b545a62012-11-23 00:27:18 +00002434 num_evts = events_get(eqo);
2435
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436 /* Process all TXQs serviced by this EQ */
2437 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2438 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2439 eqo->tx_budget, i);
2440 if (!tx_done)
2441 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002442 }
2443
Sathya Perla6384a4d2013-10-25 10:40:16 +05302444 if (be_lock_napi(eqo)) {
2445 /* This loop will iterate twice for EQ0 in which
2446 * completions of the last RXQ (default one) are also processed
2447 * For other EQs the loop iterates only once
2448 */
2449 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2450 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2451 max_work = max(work, max_work);
2452 }
2453 be_unlock_napi(eqo);
2454 } else {
2455 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002456 }
2457
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 if (is_mcc_eqo(eqo))
2459 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002460
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002461 if (max_work < budget) {
2462 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002463 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 } else {
2465 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002466 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002467 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002469}
2470
Sathya Perla6384a4d2013-10-25 10:40:16 +05302471#ifdef CONFIG_NET_RX_BUSY_POLL
2472static int be_busy_poll(struct napi_struct *napi)
2473{
2474 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2475 struct be_adapter *adapter = eqo->adapter;
2476 struct be_rx_obj *rxo;
2477 int i, work = 0;
2478
2479 if (!be_lock_busy_poll(eqo))
2480 return LL_FLUSH_BUSY;
2481
2482 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2483 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2484 if (work)
2485 break;
2486 }
2487
2488 be_unlock_busy_poll(eqo);
2489 return work;
2490}
2491#endif
2492
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002493void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002494{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002495 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2496 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002497 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302498 bool error_detected = false;
2499 struct device *dev = &adapter->pdev->dev;
2500 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002501
Sathya Perlad23e9462012-12-17 19:38:51 +00002502 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002503 return;
2504
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002505 if (lancer_chip(adapter)) {
2506 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2507 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2508 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302509 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002510 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302511 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302512 adapter->hw_error = true;
2513 /* Do not log error messages if its a FW reset */
2514 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2515 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2516 dev_info(dev, "Firmware update in progress\n");
2517 } else {
2518 error_detected = true;
2519 dev_err(dev, "Error detected in the card\n");
2520 dev_err(dev, "ERR: sliport status 0x%x\n",
2521 sliport_status);
2522 dev_err(dev, "ERR: sliport error1 0x%x\n",
2523 sliport_err1);
2524 dev_err(dev, "ERR: sliport error2 0x%x\n",
2525 sliport_err2);
2526 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002527 }
2528 } else {
2529 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302530 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002531 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302532 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002533 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302534 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002535 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302536 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002537
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002538 ue_lo = (ue_lo & ~ue_lo_mask);
2539 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002540
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302541 /* On certain platforms BE hardware can indicate spurious UEs.
2542 * Allow HW to stop working completely in case of a real UE.
2543 * Hence not setting the hw_error for UE detection.
2544 */
2545
2546 if (ue_lo || ue_hi) {
2547 error_detected = true;
2548 dev_err(dev,
2549 "Unrecoverable Error detected in the adapter");
2550 dev_err(dev, "Please reboot server to recover");
2551 if (skyhawk_chip(adapter))
2552 adapter->hw_error = true;
2553 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2554 if (ue_lo & 1)
2555 dev_err(dev, "UE: %s bit set\n",
2556 ue_status_low_desc[i]);
2557 }
2558 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2559 if (ue_hi & 1)
2560 dev_err(dev, "UE: %s bit set\n",
2561 ue_status_hi_desc[i]);
2562 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302563 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002564 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302565 if (error_detected)
2566 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002567}
2568
Sathya Perla8d56ff12009-11-22 22:02:26 +00002569static void be_msix_disable(struct be_adapter *adapter)
2570{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002571 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002572 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002573 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302574 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002575 }
2576}
2577
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002578static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002580 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002581 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582
Sathya Perla92bf14a2013-08-27 16:57:32 +05302583 /* If RoCE is supported, program the max number of NIC vectors that
2584 * may be configured via set-channels, along with vectors needed for
2585 * RoCe. Else, just program the number we'll use initially.
2586 */
2587 if (be_roce_supported(adapter))
2588 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2589 2 * num_online_cpus());
2590 else
2591 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002592
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002593 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 adapter->msix_entries[i].entry = i;
2595
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002596 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2597 MIN_MSIX_VECTORS, num_vec);
2598 if (num_vec < 0)
2599 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002600
Sathya Perla92bf14a2013-08-27 16:57:32 +05302601 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2602 adapter->num_msix_roce_vec = num_vec / 2;
2603 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2604 adapter->num_msix_roce_vec);
2605 }
2606
2607 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2608
2609 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2610 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002611 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002612
2613fail:
2614 dev_warn(dev, "MSIx enable failed\n");
2615
2616 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2617 if (!be_physfn(adapter))
2618 return num_vec;
2619 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002620}
2621
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002622static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302623 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302625 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002626}
2627
2628static int be_msix_register(struct be_adapter *adapter)
2629{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 struct net_device *netdev = adapter->netdev;
2631 struct be_eq_obj *eqo;
2632 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 for_all_evt_queues(adapter, eqo, i) {
2635 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2636 vec = be_msix_vec_get(adapter, eqo);
2637 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002638 if (status)
2639 goto err_msix;
2640 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002641
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002642 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002643err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2645 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2646 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302647 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002648 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649 return status;
2650}
2651
2652static int be_irq_register(struct be_adapter *adapter)
2653{
2654 struct net_device *netdev = adapter->netdev;
2655 int status;
2656
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002657 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002658 status = be_msix_register(adapter);
2659 if (status == 0)
2660 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002661 /* INTx is not supported for VF */
2662 if (!be_physfn(adapter))
2663 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002664 }
2665
Sathya Perlae49cc342012-11-27 19:50:02 +00002666 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002667 netdev->irq = adapter->pdev->irq;
2668 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002669 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002670 if (status) {
2671 dev_err(&adapter->pdev->dev,
2672 "INTx request IRQ failed - err %d\n", status);
2673 return status;
2674 }
2675done:
2676 adapter->isr_registered = true;
2677 return 0;
2678}
2679
2680static void be_irq_unregister(struct be_adapter *adapter)
2681{
2682 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002683 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002684 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002685
2686 if (!adapter->isr_registered)
2687 return;
2688
2689 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002690 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002691 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692 goto done;
2693 }
2694
2695 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002696 for_all_evt_queues(adapter, eqo, i)
2697 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002698
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002699done:
2700 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701}
2702
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002703static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002704{
2705 struct be_queue_info *q;
2706 struct be_rx_obj *rxo;
2707 int i;
2708
2709 for_all_rx_queues(adapter, rxo, i) {
2710 q = &rxo->q;
2711 if (q->created) {
2712 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002713 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002714 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002716 }
2717}
2718
Sathya Perla889cd4b2010-05-30 23:33:45 +00002719static int be_close(struct net_device *netdev)
2720{
2721 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002722 struct be_eq_obj *eqo;
2723 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002724
Kalesh APe1ad8e32014-04-14 16:12:41 +05302725 /* This protection is needed as be_close() may be called even when the
2726 * adapter is in cleared state (after eeh perm failure)
2727 */
2728 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2729 return 0;
2730
Parav Pandit045508a2012-03-26 14:27:13 +00002731 be_roce_dev_close(adapter);
2732
Ivan Veceradff345c52013-11-27 08:59:32 +01002733 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2734 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002735 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302736 be_disable_busy_poll(eqo);
2737 }
David S. Miller71237b62013-11-28 18:53:36 -05002738 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002739 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002740
2741 be_async_mcc_disable(adapter);
2742
2743 /* Wait for all pending tx completions to arrive so that
2744 * all tx skbs are freed.
2745 */
Sathya Perlafba87552013-05-08 02:05:50 +00002746 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302747 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002748
2749 be_rx_qs_destroy(adapter);
2750
Ajit Khaparded11a3472013-11-18 10:44:37 -06002751 for (i = 1; i < (adapter->uc_macs + 1); i++)
2752 be_cmd_pmac_del(adapter, adapter->if_handle,
2753 adapter->pmac_id[i], 0);
2754 adapter->uc_macs = 0;
2755
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002756 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002757 if (msix_enabled(adapter))
2758 synchronize_irq(be_msix_vec_get(adapter, eqo));
2759 else
2760 synchronize_irq(netdev->irq);
2761 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002762 }
2763
Sathya Perla889cd4b2010-05-30 23:33:45 +00002764 be_irq_unregister(adapter);
2765
Sathya Perla482c9e72011-06-29 23:33:17 +00002766 return 0;
2767}
2768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002769static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002770{
2771 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002772 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302773 u8 rss_hkey[RSS_HASH_KEY_LEN];
2774 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002775
2776 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002777 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2778 sizeof(struct be_eth_rx_d));
2779 if (rc)
2780 return rc;
2781 }
2782
2783 /* The FW would like the default RXQ to be created first */
2784 rxo = default_rxo(adapter);
2785 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2786 adapter->if_handle, false, &rxo->rss_id);
2787 if (rc)
2788 return rc;
2789
2790 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002791 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002792 rx_frag_size, adapter->if_handle,
2793 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002794 if (rc)
2795 return rc;
2796 }
2797
2798 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302799 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2800 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002801 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302802 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002803 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302804 rss->rsstable[j + i] = rxo->rss_id;
2805 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002806 }
2807 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302808 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2809 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002810
2811 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302812 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2813 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302814 } else {
2815 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302816 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302817 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002818
Venkata Duvvurue2557872014-04-21 15:38:00 +05302819 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302820 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302821 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302822 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302823 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302824 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002825 }
2826
Venkata Duvvurue2557872014-04-21 15:38:00 +05302827 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2828
Sathya Perla482c9e72011-06-29 23:33:17 +00002829 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002831 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002832 return 0;
2833}
2834
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002835static int be_open(struct net_device *netdev)
2836{
2837 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002839 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002841 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002842 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002843
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002844 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002845 if (status)
2846 goto err;
2847
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002848 status = be_irq_register(adapter);
2849 if (status)
2850 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002851
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002852 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002853 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002855 for_all_tx_queues(adapter, txo, i)
2856 be_cq_notify(adapter, txo->cq.id, true, 0);
2857
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002858 be_async_mcc_enable(adapter);
2859
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002860 for_all_evt_queues(adapter, eqo, i) {
2861 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302862 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002863 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2864 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002865 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002866
Sathya Perla323ff712012-09-28 04:39:43 +00002867 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002868 if (!status)
2869 be_link_status_update(adapter, link_status);
2870
Sathya Perlafba87552013-05-08 02:05:50 +00002871 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002872 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302873
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302874#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302875 if (skyhawk_chip(adapter))
2876 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302877#endif
2878
Sathya Perla889cd4b2010-05-30 23:33:45 +00002879 return 0;
2880err:
2881 be_close(adapter->netdev);
2882 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002883}
2884
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002885static int be_setup_wol(struct be_adapter *adapter, bool enable)
2886{
2887 struct be_dma_mem cmd;
2888 int status = 0;
2889 u8 mac[ETH_ALEN];
2890
2891 memset(mac, 0, ETH_ALEN);
2892
2893 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002894 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2895 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002896 if (cmd.va == NULL)
2897 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002898
2899 if (enable) {
2900 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302901 PCICFG_PM_CONTROL_OFFSET,
2902 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002903 if (status) {
2904 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002905 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002906 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2907 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002908 return status;
2909 }
2910 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302911 adapter->netdev->dev_addr,
2912 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002913 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2914 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2915 } else {
2916 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2917 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2918 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2919 }
2920
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002921 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002922 return status;
2923}
2924
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002925/*
2926 * Generate a seed MAC address from the PF MAC Address using jhash.
2927 * MAC Address for VFs are assigned incrementally starting from the seed.
2928 * These addresses are programmed in the ASIC by the PF and the VF driver
2929 * queries for the MAC address during its probe.
2930 */
Sathya Perla4c876612013-02-03 20:30:11 +00002931static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002932{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002933 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002934 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002935 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002936 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002937
2938 be_vf_eth_addr_generate(adapter, mac);
2939
Sathya Perla11ac75e2011-12-13 00:58:50 +00002940 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302941 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002942 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002943 vf_cfg->if_handle,
2944 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302945 else
2946 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2947 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002948
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002949 if (status)
2950 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302951 "Mac address assignment failed for VF %d\n",
2952 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002953 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002954 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002955
2956 mac[5] += 1;
2957 }
2958 return status;
2959}
2960
Sathya Perla4c876612013-02-03 20:30:11 +00002961static int be_vfs_mac_query(struct be_adapter *adapter)
2962{
2963 int status, vf;
2964 u8 mac[ETH_ALEN];
2965 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002966
2967 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302968 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2969 mac, vf_cfg->if_handle,
2970 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002971 if (status)
2972 return status;
2973 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2974 }
2975 return 0;
2976}
2977
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002978static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002979{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002980 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002981 u32 vf;
2982
Sathya Perla257a3fe2013-06-14 15:54:51 +05302983 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002984 dev_warn(&adapter->pdev->dev,
2985 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002986 goto done;
2987 }
2988
Sathya Perlab4c1df92013-05-08 02:05:47 +00002989 pci_disable_sriov(adapter->pdev);
2990
Sathya Perla11ac75e2011-12-13 00:58:50 +00002991 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302992 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002993 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2994 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302995 else
2996 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2997 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002998
Sathya Perla11ac75e2011-12-13 00:58:50 +00002999 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3000 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003001done:
3002 kfree(adapter->vf_cfg);
3003 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003004}
3005
Sathya Perla77071332013-08-27 16:57:34 +05303006static void be_clear_queues(struct be_adapter *adapter)
3007{
3008 be_mcc_queues_destroy(adapter);
3009 be_rx_cqs_destroy(adapter);
3010 be_tx_queues_destroy(adapter);
3011 be_evt_queues_destroy(adapter);
3012}
3013
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303014static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003015{
Sathya Perla191eb752012-02-23 18:50:13 +00003016 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3017 cancel_delayed_work_sync(&adapter->work);
3018 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3019 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303020}
3021
Somnath Koturb05004a2013-12-05 12:08:16 +05303022static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303023{
3024 int i;
3025
Somnath Koturb05004a2013-12-05 12:08:16 +05303026 if (adapter->pmac_id) {
3027 for (i = 0; i < (adapter->uc_macs + 1); i++)
3028 be_cmd_pmac_del(adapter, adapter->if_handle,
3029 adapter->pmac_id[i], 0);
3030 adapter->uc_macs = 0;
3031
3032 kfree(adapter->pmac_id);
3033 adapter->pmac_id = NULL;
3034 }
3035}
3036
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303037#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303038static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3039{
3040 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3041 be_cmd_manage_iface(adapter, adapter->if_handle,
3042 OP_CONVERT_TUNNEL_TO_NORMAL);
3043
3044 if (adapter->vxlan_port)
3045 be_cmd_set_vxlan_port(adapter, 0);
3046
3047 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3048 adapter->vxlan_port = 0;
3049}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303050#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303051
Somnath Koturb05004a2013-12-05 12:08:16 +05303052static int be_clear(struct be_adapter *adapter)
3053{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303054 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003055
Sathya Perla11ac75e2011-12-13 00:58:50 +00003056 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003057 be_vf_clear(adapter);
3058
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303059#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303060 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303061#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303062 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303063 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003064
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003065 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003066
Sathya Perla77071332013-08-27 16:57:34 +05303067 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003068
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003069 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303070 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003071 return 0;
3072}
3073
Sathya Perla4c876612013-02-03 20:30:11 +00003074static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003075{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303076 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003077 struct be_vf_cfg *vf_cfg;
3078 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003079 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003080
Sathya Perla4c876612013-02-03 20:30:11 +00003081 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3082 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003083
Sathya Perla4c876612013-02-03 20:30:11 +00003084 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303085 if (!BE3_chip(adapter)) {
3086 status = be_cmd_get_profile_config(adapter, &res,
3087 vf + 1);
3088 if (!status)
3089 cap_flags = res.if_cap_flags;
3090 }
Sathya Perla4c876612013-02-03 20:30:11 +00003091
3092 /* If a FW profile exists, then cap_flags are updated */
3093 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303094 BE_IF_FLAGS_BROADCAST |
3095 BE_IF_FLAGS_MULTICAST);
3096 status =
3097 be_cmd_if_create(adapter, cap_flags, en_flags,
3098 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003099 if (status)
3100 goto err;
3101 }
3102err:
3103 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003104}
3105
Sathya Perla39f1d942012-05-08 19:41:24 +00003106static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003107{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003108 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003109 int vf;
3110
Sathya Perla39f1d942012-05-08 19:41:24 +00003111 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3112 GFP_KERNEL);
3113 if (!adapter->vf_cfg)
3114 return -ENOMEM;
3115
Sathya Perla11ac75e2011-12-13 00:58:50 +00003116 for_all_vfs(adapter, vf_cfg, vf) {
3117 vf_cfg->if_handle = -1;
3118 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003119 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003120 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003121}
3122
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003123static int be_vf_setup(struct be_adapter *adapter)
3124{
Sathya Perla4c876612013-02-03 20:30:11 +00003125 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303126 struct be_vf_cfg *vf_cfg;
3127 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303128 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303129 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003130
Sathya Perla257a3fe2013-06-14 15:54:51 +05303131 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003132 if (old_vfs) {
3133 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3134 if (old_vfs != num_vfs)
3135 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3136 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003137 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303138 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003139 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303140 be_max_vfs(adapter), num_vfs);
3141 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003142 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003143 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003144 }
3145
3146 status = be_vf_setup_init(adapter);
3147 if (status)
3148 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003149
Sathya Perla4c876612013-02-03 20:30:11 +00003150 if (old_vfs) {
3151 for_all_vfs(adapter, vf_cfg, vf) {
3152 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3153 if (status)
3154 goto err;
3155 }
3156 } else {
3157 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 if (status)
3159 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003160 }
3161
Sathya Perla4c876612013-02-03 20:30:11 +00003162 if (old_vfs) {
3163 status = be_vfs_mac_query(adapter);
3164 if (status)
3165 goto err;
3166 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003167 status = be_vf_eth_addr_config(adapter);
3168 if (status)
3169 goto err;
3170 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003171
Sathya Perla11ac75e2011-12-13 00:58:50 +00003172 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303173 /* Allow VFs to programs MAC/VLAN filters */
3174 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3175 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3176 status = be_cmd_set_fn_privileges(adapter,
3177 privileges |
3178 BE_PRIV_FILTMGMT,
3179 vf + 1);
3180 if (!status)
3181 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3182 vf);
3183 }
3184
Sathya Perla4c876612013-02-03 20:30:11 +00003185 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3186 * Allow full available bandwidth
3187 */
3188 if (BE3_chip(adapter) && !old_vfs)
Sathya Perlaa4018012014-03-27 10:46:18 +05303189 be_cmd_config_qos(adapter, 1000, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003190
3191 status = be_cmd_link_status_query(adapter, &lnk_speed,
3192 NULL, vf + 1);
3193 if (!status)
3194 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003195
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303196 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303197 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303198 be_cmd_set_logical_link_config(adapter,
3199 IFLA_VF_LINK_STATE_AUTO,
3200 vf+1);
3201 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003202 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003203
3204 if (!old_vfs) {
3205 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3206 if (status) {
3207 dev_err(dev, "SRIOV enable failed\n");
3208 adapter->num_vfs = 0;
3209 goto err;
3210 }
3211 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003212 return 0;
3213err:
Sathya Perla4c876612013-02-03 20:30:11 +00003214 dev_err(dev, "VF setup failed\n");
3215 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216 return status;
3217}
3218
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303219/* Converting function_mode bits on BE3 to SH mc_type enums */
3220
3221static u8 be_convert_mc_type(u32 function_mode)
3222{
3223 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3224 return vNIC1;
3225 else if (function_mode & FLEX10_MODE)
3226 return FLEX10;
3227 else if (function_mode & VNIC_MODE)
3228 return vNIC2;
3229 else if (function_mode & UMC_ENABLED)
3230 return UMC;
3231 else
3232 return MC_NONE;
3233}
3234
Sathya Perla92bf14a2013-08-27 16:57:32 +05303235/* On BE2/BE3 FW does not suggest the supported limits */
3236static void BEx_get_resources(struct be_adapter *adapter,
3237 struct be_resources *res)
3238{
3239 struct pci_dev *pdev = adapter->pdev;
3240 bool use_sriov = false;
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303241 int max_vfs = 0;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303242
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303243 if (be_physfn(adapter) && BE3_chip(adapter)) {
3244 be_cmd_get_profile_config(adapter, res, 0);
3245 /* Some old versions of BE3 FW don't report max_vfs value */
3246 if (res->max_vfs == 0) {
3247 max_vfs = pci_sriov_get_totalvfs(pdev);
3248 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3249 }
3250 use_sriov = res->max_vfs && sriov_want(adapter);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303251 }
3252
3253 if (be_physfn(adapter))
3254 res->max_uc_mac = BE_UC_PMAC_COUNT;
3255 else
3256 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3257
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303258 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3259
3260 if (be_is_mc(adapter)) {
3261 /* Assuming that there are 4 channels per port,
3262 * when multi-channel is enabled
3263 */
3264 if (be_is_qnq_mode(adapter))
3265 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3266 else
3267 /* In a non-qnq multichannel mode, the pvid
3268 * takes up one vlan entry
3269 */
3270 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3271 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303272 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303273 }
3274
Sathya Perla92bf14a2013-08-27 16:57:32 +05303275 res->max_mcast_mac = BE_MAX_MC;
3276
Vasundhara Volama5243da2014-03-11 18:53:07 +05303277 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3278 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3279 * *only* if it is RSS-capable.
3280 */
3281 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3282 !be_physfn(adapter) || (be_is_mc(adapter) &&
3283 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303284 res->max_tx_qs = 1;
3285 else
3286 res->max_tx_qs = BE3_MAX_TX_QS;
3287
3288 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3289 !use_sriov && be_physfn(adapter))
3290 res->max_rss_qs = (adapter->be3_native) ?
3291 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3292 res->max_rx_qs = res->max_rss_qs + 1;
3293
Suresh Reddye3dc8672014-01-06 13:02:25 +05303294 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303295 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303296 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3297 else
3298 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303299
3300 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3301 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3302 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3303}
3304
Sathya Perla30128032011-11-10 19:17:57 +00003305static void be_setup_init(struct be_adapter *adapter)
3306{
3307 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003308 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003309 adapter->if_handle = -1;
3310 adapter->be3_native = false;
3311 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003312 if (be_physfn(adapter))
3313 adapter->cmd_privileges = MAX_PRIVILEGES;
3314 else
3315 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003316}
3317
Sathya Perla92bf14a2013-08-27 16:57:32 +05303318static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003319{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303320 struct device *dev = &adapter->pdev->dev;
3321 struct be_resources res = {0};
3322 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003323
Sathya Perla92bf14a2013-08-27 16:57:32 +05303324 if (BEx_chip(adapter)) {
3325 BEx_get_resources(adapter, &res);
3326 adapter->res = res;
3327 }
3328
Sathya Perla92bf14a2013-08-27 16:57:32 +05303329 /* For Lancer, SH etc read per-function resource limits from FW.
3330 * GET_FUNC_CONFIG returns per function guaranteed limits.
3331 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3332 */
Sathya Perla4c876612013-02-03 20:30:11 +00003333 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303334 status = be_cmd_get_func_config(adapter, &res);
3335 if (status)
3336 return status;
3337
3338 /* If RoCE may be enabled stash away half the EQs for RoCE */
3339 if (be_roce_supported(adapter))
3340 res.max_evt_qs /= 2;
3341 adapter->res = res;
3342
3343 if (be_physfn(adapter)) {
3344 status = be_cmd_get_profile_config(adapter, &res, 0);
3345 if (status)
3346 return status;
3347 adapter->res.max_vfs = res.max_vfs;
3348 }
3349
3350 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3351 be_max_txqs(adapter), be_max_rxqs(adapter),
3352 be_max_rss(adapter), be_max_eqs(adapter),
3353 be_max_vfs(adapter));
3354 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3355 be_max_uc(adapter), be_max_mc(adapter),
3356 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003357 }
3358
Sathya Perla92bf14a2013-08-27 16:57:32 +05303359 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003360}
3361
Sathya Perla39f1d942012-05-08 19:41:24 +00003362/* Routine to query per function resource limits */
3363static int be_get_config(struct be_adapter *adapter)
3364{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303365 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003366 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003367
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003368 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3369 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003370 &adapter->function_caps,
3371 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003372 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303373 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003374
Vasundhara Volam542963b2014-01-15 13:23:33 +05303375 if (be_physfn(adapter)) {
3376 status = be_cmd_get_active_profile(adapter, &profile_id);
3377 if (!status)
3378 dev_info(&adapter->pdev->dev,
3379 "Using profile 0x%x\n", profile_id);
3380 }
3381
Sathya Perla92bf14a2013-08-27 16:57:32 +05303382 status = be_get_resources(adapter);
3383 if (status)
3384 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003385
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303386 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3387 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303388 if (!adapter->pmac_id)
3389 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003390
Sathya Perla92bf14a2013-08-27 16:57:32 +05303391 /* Sanitize cfg_num_qs based on HW and platform limits */
3392 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3393
3394 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003395}
3396
Sathya Perla95046b92013-07-23 15:25:02 +05303397static int be_mac_setup(struct be_adapter *adapter)
3398{
3399 u8 mac[ETH_ALEN];
3400 int status;
3401
3402 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3403 status = be_cmd_get_perm_mac(adapter, mac);
3404 if (status)
3405 return status;
3406
3407 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3408 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3409 } else {
3410 /* Maybe the HW was reset; dev_addr must be re-programmed */
3411 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3412 }
3413
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003414 /* For BE3-R VFs, the PF programs the initial MAC address */
3415 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3416 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3417 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303418 return 0;
3419}
3420
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303421static void be_schedule_worker(struct be_adapter *adapter)
3422{
3423 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3424 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3425}
3426
Sathya Perla77071332013-08-27 16:57:34 +05303427static int be_setup_queues(struct be_adapter *adapter)
3428{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303429 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303430 int status;
3431
3432 status = be_evt_queues_create(adapter);
3433 if (status)
3434 goto err;
3435
3436 status = be_tx_qs_create(adapter);
3437 if (status)
3438 goto err;
3439
3440 status = be_rx_cqs_create(adapter);
3441 if (status)
3442 goto err;
3443
3444 status = be_mcc_queues_create(adapter);
3445 if (status)
3446 goto err;
3447
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303448 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3449 if (status)
3450 goto err;
3451
3452 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3453 if (status)
3454 goto err;
3455
Sathya Perla77071332013-08-27 16:57:34 +05303456 return 0;
3457err:
3458 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3459 return status;
3460}
3461
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303462int be_update_queues(struct be_adapter *adapter)
3463{
3464 struct net_device *netdev = adapter->netdev;
3465 int status;
3466
3467 if (netif_running(netdev))
3468 be_close(netdev);
3469
3470 be_cancel_worker(adapter);
3471
3472 /* If any vectors have been shared with RoCE we cannot re-program
3473 * the MSIx table.
3474 */
3475 if (!adapter->num_msix_roce_vec)
3476 be_msix_disable(adapter);
3477
3478 be_clear_queues(adapter);
3479
3480 if (!msix_enabled(adapter)) {
3481 status = be_msix_enable(adapter);
3482 if (status)
3483 return status;
3484 }
3485
3486 status = be_setup_queues(adapter);
3487 if (status)
3488 return status;
3489
3490 be_schedule_worker(adapter);
3491
3492 if (netif_running(netdev))
3493 status = be_open(netdev);
3494
3495 return status;
3496}
3497
Sathya Perla5fb379e2009-06-18 00:02:59 +00003498static int be_setup(struct be_adapter *adapter)
3499{
Sathya Perla39f1d942012-05-08 19:41:24 +00003500 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303501 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003502 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503
Sathya Perla30128032011-11-10 19:17:57 +00003504 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003505
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003506 if (!lancer_chip(adapter))
3507 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003508
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003509 status = be_get_config(adapter);
3510 if (status)
3511 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003512
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003513 status = be_msix_enable(adapter);
3514 if (status)
3515 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003516
Sathya Perla77071332013-08-27 16:57:34 +05303517 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3518 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3519 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3520 en_flags |= BE_IF_FLAGS_RSS;
3521 en_flags = en_flags & be_if_cap_flags(adapter);
3522 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3523 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003524 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003525 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003526
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303527 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3528 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303529 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303530 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003531 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003532 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003534 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003535
Sathya Perla95046b92013-07-23 15:25:02 +05303536 status = be_mac_setup(adapter);
3537 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003538 goto err;
3539
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003540 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003541
Somnath Koture9e2a902013-10-24 14:37:53 +05303542 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3543 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3544 adapter->fw_ver);
3545 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3546 }
3547
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003548 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003549 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003550
3551 be_set_rx_mode(adapter->netdev);
3552
Suresh Reddy76a9e082014-01-15 13:23:40 +05303553 be_cmd_get_acpi_wol_cap(adapter);
3554
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003555 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003556
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003557 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3558 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003559 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003560
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303561 if (be_physfn(adapter))
3562 be_cmd_set_logical_link_config(adapter,
3563 IFLA_VF_LINK_STATE_AUTO, 0);
3564
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303565 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303566 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003567 be_vf_setup(adapter);
3568 else
3569 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003570 }
3571
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003572 status = be_cmd_get_phy_info(adapter);
3573 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003574 adapter->phy.fc_autoneg = 1;
3575
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303576 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303577 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003578 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003579err:
3580 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581 return status;
3582}
3583
Ivan Vecera66268732011-12-08 01:31:21 +00003584#ifdef CONFIG_NET_POLL_CONTROLLER
3585static void be_netpoll(struct net_device *netdev)
3586{
3587 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003588 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003589 int i;
3590
Sathya Perlae49cc342012-11-27 19:50:02 +00003591 for_all_evt_queues(adapter, eqo, i) {
3592 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3593 napi_schedule(&eqo->napi);
3594 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003595
3596 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003597}
3598#endif
3599
Ajit Khaparde84517482009-09-04 03:12:16 +00003600#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003601static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003602
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003603static bool be_flash_redboot(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303604 const u8 *p, u32 img_start, int image_size,
3605 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003606{
3607 u32 crc_offset;
3608 u8 flashed_crc[4];
3609 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003610
3611 crc_offset = hdr_size + img_start + image_size - 4;
3612
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003613 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003614
Sathya Perla748b5392014-05-09 13:29:13 +05303615 status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003616 if (status) {
3617 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303618 "could not get crc from flash, not flashing redboot\n");
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003619 return false;
3620 }
3621
3622 /*update redboot only if crc does not match*/
3623 if (!memcmp(flashed_crc, p, 4))
3624 return false;
3625 else
3626 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003627}
3628
Sathya Perla306f1342011-08-02 19:57:45 +00003629static bool phy_flashing_required(struct be_adapter *adapter)
3630{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003631 return (adapter->phy.phy_type == TN_8022 &&
3632 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003633}
3634
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003635static bool is_comp_in_ufi(struct be_adapter *adapter,
3636 struct flash_section_info *fsec, int type)
3637{
3638 int i = 0, img_type = 0;
3639 struct flash_section_info_g2 *fsec_g2 = NULL;
3640
Sathya Perlaca34fe32012-11-06 17:48:56 +00003641 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003642 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3643
3644 for (i = 0; i < MAX_FLASH_COMP; i++) {
3645 if (fsec_g2)
3646 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3647 else
3648 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3649
3650 if (img_type == type)
3651 return true;
3652 }
3653 return false;
3654
3655}
3656
Jingoo Han4188e7d2013-08-05 18:02:02 +09003657static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303658 int header_size,
3659 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003660{
3661 struct flash_section_info *fsec = NULL;
3662 const u8 *p = fw->data;
3663
3664 p += header_size;
3665 while (p < (fw->data + fw->size)) {
3666 fsec = (struct flash_section_info *)p;
3667 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3668 return fsec;
3669 p += 32;
3670 }
3671 return NULL;
3672}
3673
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003674static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303675 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003676{
3677 u32 total_bytes = 0, flash_op, num_bytes = 0;
3678 int status = 0;
3679 struct be_cmd_write_flashrom *req = flash_cmd->va;
3680
3681 total_bytes = img_size;
3682 while (total_bytes) {
3683 num_bytes = min_t(u32, 32*1024, total_bytes);
3684
3685 total_bytes -= num_bytes;
3686
3687 if (!total_bytes) {
3688 if (optype == OPTYPE_PHY_FW)
3689 flash_op = FLASHROM_OPER_PHY_FLASH;
3690 else
3691 flash_op = FLASHROM_OPER_FLASH;
3692 } else {
3693 if (optype == OPTYPE_PHY_FW)
3694 flash_op = FLASHROM_OPER_PHY_SAVE;
3695 else
3696 flash_op = FLASHROM_OPER_SAVE;
3697 }
3698
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003699 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003700 img += num_bytes;
3701 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303702 flash_op, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003703 if (status) {
3704 if (status == ILLEGAL_IOCTL_REQ &&
3705 optype == OPTYPE_PHY_FW)
3706 break;
3707 dev_err(&adapter->pdev->dev,
3708 "cmd to write to flash rom failed.\n");
3709 return status;
3710 }
3711 }
3712 return 0;
3713}
3714
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003715/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003716static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303717 const struct firmware *fw,
3718 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003719{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003720 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003721 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003722 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003723 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003724 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003725 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003726
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003727 struct flash_comp gen3_flash_types[] = {
3728 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3729 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3730 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3731 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3732 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3733 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3734 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3735 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3736 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3737 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3738 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3739 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3740 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3741 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3742 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3743 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3744 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3745 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3746 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3747 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003748 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003749
3750 struct flash_comp gen2_flash_types[] = {
3751 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3752 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3753 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3754 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3755 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3756 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3757 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3758 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3759 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3760 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3761 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3762 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3763 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3764 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3765 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3766 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003767 };
3768
Sathya Perlaca34fe32012-11-06 17:48:56 +00003769 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003770 pflashcomp = gen3_flash_types;
3771 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003772 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003773 } else {
3774 pflashcomp = gen2_flash_types;
3775 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003776 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003777 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003778
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003779 /* Get flash section info*/
3780 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3781 if (!fsec) {
3782 dev_err(&adapter->pdev->dev,
3783 "Invalid Cookie. UFI corrupted ?\n");
3784 return -1;
3785 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003786 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003787 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003788 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003789
3790 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3791 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3792 continue;
3793
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003794 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3795 !phy_flashing_required(adapter))
3796 continue;
3797
3798 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3799 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303800 pflashcomp[i].offset,
3801 pflashcomp[i].size,
3802 filehdr_size +
3803 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003804 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003805 continue;
3806 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003807
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003808 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003809 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003810 if (p + pflashcomp[i].size > fw->data + fw->size)
3811 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003812
3813 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303814 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003815 if (status) {
3816 dev_err(&adapter->pdev->dev,
3817 "Flashing section type %d failed.\n",
3818 pflashcomp[i].img_type);
3819 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003820 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003821 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003822 return 0;
3823}
3824
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003825static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303826 const struct firmware *fw,
3827 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003828{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003829 int status = 0, i, filehdr_size = 0;
3830 int img_offset, img_size, img_optype, redboot;
3831 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3832 const u8 *p = fw->data;
3833 struct flash_section_info *fsec = NULL;
3834
3835 filehdr_size = sizeof(struct flash_file_hdr_g3);
3836 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3837 if (!fsec) {
3838 dev_err(&adapter->pdev->dev,
3839 "Invalid Cookie. UFI corrupted ?\n");
3840 return -1;
3841 }
3842
3843 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3844 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3845 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3846
3847 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3848 case IMAGE_FIRMWARE_iSCSI:
3849 img_optype = OPTYPE_ISCSI_ACTIVE;
3850 break;
3851 case IMAGE_BOOT_CODE:
3852 img_optype = OPTYPE_REDBOOT;
3853 break;
3854 case IMAGE_OPTION_ROM_ISCSI:
3855 img_optype = OPTYPE_BIOS;
3856 break;
3857 case IMAGE_OPTION_ROM_PXE:
3858 img_optype = OPTYPE_PXE_BIOS;
3859 break;
3860 case IMAGE_OPTION_ROM_FCoE:
3861 img_optype = OPTYPE_FCOE_BIOS;
3862 break;
3863 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3864 img_optype = OPTYPE_ISCSI_BACKUP;
3865 break;
3866 case IMAGE_NCSI:
3867 img_optype = OPTYPE_NCSI_FW;
3868 break;
3869 default:
3870 continue;
3871 }
3872
3873 if (img_optype == OPTYPE_REDBOOT) {
3874 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303875 img_offset, img_size,
3876 filehdr_size +
3877 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003878 if (!redboot)
3879 continue;
3880 }
3881
3882 p = fw->data;
3883 p += filehdr_size + img_offset + img_hdrs_size;
3884 if (p + img_size > fw->data + fw->size)
3885 return -1;
3886
3887 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3888 if (status) {
3889 dev_err(&adapter->pdev->dev,
3890 "Flashing section type %d failed.\n",
3891 fsec->fsec_entry[i].type);
3892 return status;
3893 }
3894 }
3895 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003896}
3897
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003898static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303899 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003900{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003901#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3902#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3903 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003904 const u8 *data_ptr = NULL;
3905 u8 *dest_image_ptr = NULL;
3906 size_t image_size = 0;
3907 u32 chunk_size = 0;
3908 u32 data_written = 0;
3909 u32 offset = 0;
3910 int status = 0;
3911 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003912 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003913
3914 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3915 dev_err(&adapter->pdev->dev,
3916 "FW Image not properly aligned. "
3917 "Length must be 4 byte aligned.\n");
3918 status = -EINVAL;
3919 goto lancer_fw_exit;
3920 }
3921
3922 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3923 + LANCER_FW_DOWNLOAD_CHUNK;
3924 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003925 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003926 if (!flash_cmd.va) {
3927 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003928 goto lancer_fw_exit;
3929 }
3930
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003931 dest_image_ptr = flash_cmd.va +
3932 sizeof(struct lancer_cmd_req_write_object);
3933 image_size = fw->size;
3934 data_ptr = fw->data;
3935
3936 while (image_size) {
3937 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3938
3939 /* Copy the image chunk content. */
3940 memcpy(dest_image_ptr, data_ptr, chunk_size);
3941
3942 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003943 chunk_size, offset,
3944 LANCER_FW_DOWNLOAD_LOCATION,
3945 &data_written, &change_status,
3946 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003947 if (status)
3948 break;
3949
3950 offset += data_written;
3951 data_ptr += data_written;
3952 image_size -= data_written;
3953 }
3954
3955 if (!status) {
3956 /* Commit the FW written */
3957 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003958 0, offset,
3959 LANCER_FW_DOWNLOAD_LOCATION,
3960 &data_written, &change_status,
3961 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003962 }
3963
3964 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05303965 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003966 if (status) {
3967 dev_err(&adapter->pdev->dev,
3968 "Firmware load error. "
3969 "Status code: 0x%x Additional Status: 0x%x\n",
3970 status, add_status);
3971 goto lancer_fw_exit;
3972 }
3973
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003974 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303975 dev_info(&adapter->pdev->dev,
3976 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003977 status = lancer_physdev_ctrl(adapter,
3978 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003979 if (status) {
3980 dev_err(&adapter->pdev->dev,
3981 "Adapter busy for FW reset.\n"
3982 "New FW will not be active.\n");
3983 goto lancer_fw_exit;
3984 }
3985 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05303986 dev_err(&adapter->pdev->dev,
3987 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003988 }
3989
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003990 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3991lancer_fw_exit:
3992 return status;
3993}
3994
Sathya Perlaca34fe32012-11-06 17:48:56 +00003995#define UFI_TYPE2 2
3996#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003997#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003998#define UFI_TYPE4 4
3999static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004000 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004001{
4002 if (fhdr == NULL)
4003 goto be_get_ufi_exit;
4004
Sathya Perlaca34fe32012-11-06 17:48:56 +00004005 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4006 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004007 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4008 if (fhdr->asic_type_rev == 0x10)
4009 return UFI_TYPE3R;
4010 else
4011 return UFI_TYPE3;
4012 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004013 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004014
4015be_get_ufi_exit:
4016 dev_err(&adapter->pdev->dev,
4017 "UFI and Interface are not compatible for flashing\n");
4018 return -1;
4019}
4020
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004021static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4022{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004023 struct flash_file_hdr_g3 *fhdr3;
4024 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004025 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004026 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004027 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004028
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004029 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004030 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4031 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004032 if (!flash_cmd.va) {
4033 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004034 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004035 }
4036
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004037 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004038 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004039
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004040 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004041
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004042 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4043 for (i = 0; i < num_imgs; i++) {
4044 img_hdr_ptr = (struct image_hdr *)(fw->data +
4045 (sizeof(struct flash_file_hdr_g3) +
4046 i * sizeof(struct image_hdr)));
4047 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004048 switch (ufi_type) {
4049 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004050 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304051 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004052 break;
4053 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004054 status = be_flash_BEx(adapter, fw, &flash_cmd,
4055 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004056 break;
4057 case UFI_TYPE3:
4058 /* Do not flash this ufi on BE3-R cards */
4059 if (adapter->asic_rev < 0x10)
4060 status = be_flash_BEx(adapter, fw,
4061 &flash_cmd,
4062 num_imgs);
4063 else {
4064 status = -1;
4065 dev_err(&adapter->pdev->dev,
4066 "Can't load BE3 UFI on BE3R\n");
4067 }
4068 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004069 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004070 }
4071
Sathya Perlaca34fe32012-11-06 17:48:56 +00004072 if (ufi_type == UFI_TYPE2)
4073 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004074 else if (ufi_type == -1)
4075 status = -1;
4076
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004077 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4078 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004079 if (status) {
4080 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004081 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004082 }
4083
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004084 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004085
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004086be_fw_exit:
4087 return status;
4088}
4089
4090int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4091{
4092 const struct firmware *fw;
4093 int status;
4094
4095 if (!netif_running(adapter->netdev)) {
4096 dev_err(&adapter->pdev->dev,
4097 "Firmware load not allowed (interface is down)\n");
4098 return -1;
4099 }
4100
4101 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4102 if (status)
4103 goto fw_exit;
4104
4105 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4106
4107 if (lancer_chip(adapter))
4108 status = lancer_fw_download(adapter, fw);
4109 else
4110 status = be_fw_download(adapter, fw);
4111
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004112 if (!status)
4113 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4114 adapter->fw_on_flash);
4115
Ajit Khaparde84517482009-09-04 03:12:16 +00004116fw_exit:
4117 release_firmware(fw);
4118 return status;
4119}
4120
Sathya Perla748b5392014-05-09 13:29:13 +05304121static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004122{
4123 struct be_adapter *adapter = netdev_priv(dev);
4124 struct nlattr *attr, *br_spec;
4125 int rem;
4126 int status = 0;
4127 u16 mode = 0;
4128
4129 if (!sriov_enabled(adapter))
4130 return -EOPNOTSUPP;
4131
4132 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4133
4134 nla_for_each_nested(attr, br_spec, rem) {
4135 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4136 continue;
4137
4138 mode = nla_get_u16(attr);
4139 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4140 return -EINVAL;
4141
4142 status = be_cmd_set_hsw_config(adapter, 0, 0,
4143 adapter->if_handle,
4144 mode == BRIDGE_MODE_VEPA ?
4145 PORT_FWD_TYPE_VEPA :
4146 PORT_FWD_TYPE_VEB);
4147 if (status)
4148 goto err;
4149
4150 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4151 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4152
4153 return status;
4154 }
4155err:
4156 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4157 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4158
4159 return status;
4160}
4161
4162static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304163 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004164{
4165 struct be_adapter *adapter = netdev_priv(dev);
4166 int status = 0;
4167 u8 hsw_mode;
4168
4169 if (!sriov_enabled(adapter))
4170 return 0;
4171
4172 /* BE and Lancer chips support VEB mode only */
4173 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4174 hsw_mode = PORT_FWD_TYPE_VEB;
4175 } else {
4176 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4177 adapter->if_handle, &hsw_mode);
4178 if (status)
4179 return 0;
4180 }
4181
4182 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4183 hsw_mode == PORT_FWD_TYPE_VEPA ?
4184 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4185}
4186
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304187#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304188static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4189 __be16 port)
4190{
4191 struct be_adapter *adapter = netdev_priv(netdev);
4192 struct device *dev = &adapter->pdev->dev;
4193 int status;
4194
4195 if (lancer_chip(adapter) || BEx_chip(adapter))
4196 return;
4197
4198 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4199 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4200 be16_to_cpu(port));
4201 dev_info(dev,
4202 "Only one UDP port supported for VxLAN offloads\n");
4203 return;
4204 }
4205
4206 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4207 OP_CONVERT_NORMAL_TO_TUNNEL);
4208 if (status) {
4209 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4210 goto err;
4211 }
4212
4213 status = be_cmd_set_vxlan_port(adapter, port);
4214 if (status) {
4215 dev_warn(dev, "Failed to add VxLAN port\n");
4216 goto err;
4217 }
4218 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4219 adapter->vxlan_port = port;
4220
4221 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4222 be16_to_cpu(port));
4223 return;
4224err:
4225 be_disable_vxlan_offloads(adapter);
4226 return;
4227}
4228
4229static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4230 __be16 port)
4231{
4232 struct be_adapter *adapter = netdev_priv(netdev);
4233
4234 if (lancer_chip(adapter) || BEx_chip(adapter))
4235 return;
4236
4237 if (adapter->vxlan_port != port)
4238 return;
4239
4240 be_disable_vxlan_offloads(adapter);
4241
4242 dev_info(&adapter->pdev->dev,
4243 "Disabled VxLAN offloads for UDP port %d\n",
4244 be16_to_cpu(port));
4245}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304246#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304247
stephen hemmingere5686ad2012-01-05 19:10:25 +00004248static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004249 .ndo_open = be_open,
4250 .ndo_stop = be_close,
4251 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004252 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004253 .ndo_set_mac_address = be_mac_addr_set,
4254 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004255 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004256 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4258 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004259 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004260 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004261 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004262 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304263 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004264#ifdef CONFIG_NET_POLL_CONTROLLER
4265 .ndo_poll_controller = be_netpoll,
4266#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004267 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4268 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304269#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304270 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304271#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304272#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304273 .ndo_add_vxlan_port = be_add_vxlan_port,
4274 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304275#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004276};
4277
4278static void be_netdev_init(struct net_device *netdev)
4279{
4280 struct be_adapter *adapter = netdev_priv(netdev);
4281
Sathya Perlac9c47142014-03-27 10:46:19 +05304282 if (skyhawk_chip(adapter)) {
4283 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4284 NETIF_F_TSO | NETIF_F_TSO6 |
4285 NETIF_F_GSO_UDP_TUNNEL;
4286 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4287 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004288 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004289 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004290 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004291 if (be_multi_rxq(adapter))
4292 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004293
4294 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004295 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004296
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004297 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004298 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004299
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004300 netdev->priv_flags |= IFF_UNICAST_FLT;
4301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004302 netdev->flags |= IFF_MULTICAST;
4303
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004304 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004306 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004307
4308 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004309}
4310
4311static void be_unmap_pci_bars(struct be_adapter *adapter)
4312{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004313 if (adapter->csr)
4314 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004315 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004316 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004317}
4318
Sathya Perlace66f782012-11-06 17:48:58 +00004319static int db_bar(struct be_adapter *adapter)
4320{
4321 if (lancer_chip(adapter) || !be_physfn(adapter))
4322 return 0;
4323 else
4324 return 4;
4325}
4326
4327static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004328{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004329 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004330 adapter->roce_db.size = 4096;
4331 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4332 db_bar(adapter));
4333 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4334 db_bar(adapter));
4335 }
Parav Pandit045508a2012-03-26 14:27:13 +00004336 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004337}
4338
4339static int be_map_pci_bars(struct be_adapter *adapter)
4340{
4341 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004342
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004343 if (BEx_chip(adapter) && be_physfn(adapter)) {
4344 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4345 if (adapter->csr == NULL)
4346 return -ENOMEM;
4347 }
4348
Sathya Perlace66f782012-11-06 17:48:58 +00004349 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004350 if (addr == NULL)
4351 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004352 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004353
4354 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004355 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004356
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004357pci_map_err:
4358 be_unmap_pci_bars(adapter);
4359 return -ENOMEM;
4360}
4361
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004362static void be_ctrl_cleanup(struct be_adapter *adapter)
4363{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004364 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004365
4366 be_unmap_pci_bars(adapter);
4367
4368 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004369 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4370 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004371
Sathya Perla5b8821b2011-08-02 19:57:44 +00004372 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004373 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004374 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4375 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004376}
4377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004378static int be_ctrl_init(struct be_adapter *adapter)
4379{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004380 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4381 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004382 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004383 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004384 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004385
Sathya Perlace66f782012-11-06 17:48:58 +00004386 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4387 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4388 SLI_INTF_FAMILY_SHIFT;
4389 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4390
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004391 status = be_map_pci_bars(adapter);
4392 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004393 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004394
4395 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004396 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4397 mbox_mem_alloc->size,
4398 &mbox_mem_alloc->dma,
4399 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004400 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004401 status = -ENOMEM;
4402 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403 }
4404 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4405 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4406 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4407 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004408
Sathya Perla5b8821b2011-08-02 19:57:44 +00004409 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004410 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4411 rx_filter->size, &rx_filter->dma,
4412 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004413 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004414 status = -ENOMEM;
4415 goto free_mbox;
4416 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004417
Ivan Vecera29849612010-12-14 05:43:19 +00004418 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004419 spin_lock_init(&adapter->mcc_lock);
4420 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004421
Suresh Reddy5eeff632014-01-06 13:02:24 +05304422 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004423 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004424 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004425
4426free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004427 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4428 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004429
4430unmap_pci_bars:
4431 be_unmap_pci_bars(adapter);
4432
4433done:
4434 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004435}
4436
4437static void be_stats_cleanup(struct be_adapter *adapter)
4438{
Sathya Perla3abcded2010-10-03 22:12:27 -07004439 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004440
4441 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004442 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4443 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004444}
4445
4446static int be_stats_init(struct be_adapter *adapter)
4447{
Sathya Perla3abcded2010-10-03 22:12:27 -07004448 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004449
Sathya Perlaca34fe32012-11-06 17:48:56 +00004450 if (lancer_chip(adapter))
4451 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4452 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004453 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004454 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004455 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004456 else
4457 /* ALL non-BE ASICs */
4458 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004459
Joe Perchesede23fa82013-08-26 22:45:23 -07004460 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4461 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004462 if (cmd->va == NULL)
4463 return -1;
4464 return 0;
4465}
4466
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004467static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004468{
4469 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004470
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004471 if (!adapter)
4472 return;
4473
Parav Pandit045508a2012-03-26 14:27:13 +00004474 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004475 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004476
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004477 cancel_delayed_work_sync(&adapter->func_recovery_work);
4478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004479 unregister_netdev(adapter->netdev);
4480
Sathya Perla5fb379e2009-06-18 00:02:59 +00004481 be_clear(adapter);
4482
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004483 /* tell fw we're done with firing cmds */
4484 be_cmd_fw_clean(adapter);
4485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004486 be_stats_cleanup(adapter);
4487
4488 be_ctrl_cleanup(adapter);
4489
Sathya Perlad6b6d982012-09-05 01:56:48 +00004490 pci_disable_pcie_error_reporting(pdev);
4491
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004492 pci_release_regions(pdev);
4493 pci_disable_device(pdev);
4494
4495 free_netdev(adapter->netdev);
4496}
4497
Sathya Perla39f1d942012-05-08 19:41:24 +00004498static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004499{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304500 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004501
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004502 status = be_cmd_get_cntl_attributes(adapter);
4503 if (status)
4504 return status;
4505
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004506 /* Must be a power of 2 or else MODULO will BUG_ON */
4507 adapter->be_get_temp_freq = 64;
4508
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304509 if (BEx_chip(adapter)) {
4510 level = be_cmd_get_fw_log_level(adapter);
4511 adapter->msg_enable =
4512 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4513 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004514
Sathya Perla92bf14a2013-08-27 16:57:32 +05304515 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004516 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517}
4518
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004519static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004520{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004521 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004522 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004523
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004524 status = lancer_test_and_set_rdy_state(adapter);
4525 if (status)
4526 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004527
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004528 if (netif_running(adapter->netdev))
4529 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004530
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004531 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004532
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004533 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004534
4535 status = be_setup(adapter);
4536 if (status)
4537 goto err;
4538
4539 if (netif_running(adapter->netdev)) {
4540 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004541 if (status)
4542 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004543 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004544
Somnath Kotur4bebb562013-12-05 12:07:55 +05304545 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004546 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004547err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004548 if (status == -EAGAIN)
4549 dev_err(dev, "Waiting for resource provisioning\n");
4550 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304551 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004552
4553 return status;
4554}
4555
4556static void be_func_recovery_task(struct work_struct *work)
4557{
4558 struct be_adapter *adapter =
4559 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004560 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004561
4562 be_detect_error(adapter);
4563
4564 if (adapter->hw_error && lancer_chip(adapter)) {
4565
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004566 rtnl_lock();
4567 netif_device_detach(adapter->netdev);
4568 rtnl_unlock();
4569
4570 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004571 if (!status)
4572 netif_device_attach(adapter->netdev);
4573 }
4574
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004575 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4576 * no need to attempt further recovery.
4577 */
4578 if (!status || status == -EAGAIN)
4579 schedule_delayed_work(&adapter->func_recovery_work,
4580 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004581}
4582
4583static void be_worker(struct work_struct *work)
4584{
4585 struct be_adapter *adapter =
4586 container_of(work, struct be_adapter, work.work);
4587 struct be_rx_obj *rxo;
4588 int i;
4589
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004590 /* when interrupts are not yet enabled, just reap any pending
4591 * mcc completions */
4592 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004593 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004594 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004595 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004596 goto reschedule;
4597 }
4598
4599 if (!adapter->stats_cmd_sent) {
4600 if (lancer_chip(adapter))
4601 lancer_cmd_get_pport_stats(adapter,
4602 &adapter->stats_cmd);
4603 else
4604 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4605 }
4606
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304607 if (be_physfn(adapter) &&
4608 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004609 be_cmd_get_die_temperature(adapter);
4610
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004611 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304612 /* Replenish RX-queues starved due to memory
4613 * allocation failures.
4614 */
4615 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004616 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004617 }
4618
Sathya Perla2632baf2013-10-01 16:00:00 +05304619 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004620
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004621reschedule:
4622 adapter->work_counter++;
4623 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4624}
4625
Sathya Perla257a3fe2013-06-14 15:54:51 +05304626/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004627static bool be_reset_required(struct be_adapter *adapter)
4628{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304629 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004630}
4631
Sathya Perlad3791422012-09-28 04:39:44 +00004632static char *mc_name(struct be_adapter *adapter)
4633{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304634 char *str = ""; /* default */
4635
4636 switch (adapter->mc_type) {
4637 case UMC:
4638 str = "UMC";
4639 break;
4640 case FLEX10:
4641 str = "FLEX10";
4642 break;
4643 case vNIC1:
4644 str = "vNIC-1";
4645 break;
4646 case nPAR:
4647 str = "nPAR";
4648 break;
4649 case UFP:
4650 str = "UFP";
4651 break;
4652 case vNIC2:
4653 str = "vNIC-2";
4654 break;
4655 default:
4656 str = "";
4657 }
4658
4659 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004660}
4661
4662static inline char *func_name(struct be_adapter *adapter)
4663{
4664 return be_physfn(adapter) ? "PF" : "VF";
4665}
4666
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004667static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004668{
4669 int status = 0;
4670 struct be_adapter *adapter;
4671 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004672 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004673
4674 status = pci_enable_device(pdev);
4675 if (status)
4676 goto do_none;
4677
4678 status = pci_request_regions(pdev, DRV_NAME);
4679 if (status)
4680 goto disable_dev;
4681 pci_set_master(pdev);
4682
Sathya Perla7f640062012-06-05 19:37:20 +00004683 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004684 if (netdev == NULL) {
4685 status = -ENOMEM;
4686 goto rel_reg;
4687 }
4688 adapter = netdev_priv(netdev);
4689 adapter->pdev = pdev;
4690 pci_set_drvdata(pdev, adapter);
4691 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004692 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004693
Russell King4c15c242013-06-26 23:49:11 +01004694 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004695 if (!status) {
4696 netdev->features |= NETIF_F_HIGHDMA;
4697 } else {
Russell King4c15c242013-06-26 23:49:11 +01004698 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004699 if (status) {
4700 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4701 goto free_netdev;
4702 }
4703 }
4704
Ajit Khapardeea58c182013-10-18 16:06:24 -05004705 if (be_physfn(adapter)) {
4706 status = pci_enable_pcie_error_reporting(pdev);
4707 if (!status)
4708 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4709 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004710
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004711 status = be_ctrl_init(adapter);
4712 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004713 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004714
Sathya Perla2243e2e2009-11-22 22:02:03 +00004715 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004716 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004717 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004718 if (status)
4719 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004720 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004721
Sathya Perla39f1d942012-05-08 19:41:24 +00004722 if (be_reset_required(adapter)) {
4723 status = be_cmd_reset_function(adapter);
4724 if (status)
4725 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004726
Kalesh AP2d177be2013-04-28 22:22:29 +00004727 /* Wait for interrupts to quiesce after an FLR */
4728 msleep(100);
4729 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004730
4731 /* Allow interrupts for other ULPs running on NIC function */
4732 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004733
Kalesh AP2d177be2013-04-28 22:22:29 +00004734 /* tell fw we're ready to fire cmds */
4735 status = be_cmd_fw_init(adapter);
4736 if (status)
4737 goto ctrl_clean;
4738
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004739 status = be_stats_init(adapter);
4740 if (status)
4741 goto ctrl_clean;
4742
Sathya Perla39f1d942012-05-08 19:41:24 +00004743 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004744 if (status)
4745 goto stats_clean;
4746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004747 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004748 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004749 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004750
Sathya Perla5fb379e2009-06-18 00:02:59 +00004751 status = be_setup(adapter);
4752 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004753 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004754
Sathya Perla3abcded2010-10-03 22:12:27 -07004755 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004756 status = register_netdev(netdev);
4757 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004758 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004759
Parav Pandit045508a2012-03-26 14:27:13 +00004760 be_roce_dev_add(adapter);
4761
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004762 schedule_delayed_work(&adapter->func_recovery_work,
4763 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004764
4765 be_cmd_query_port_name(adapter, &port_name);
4766
Sathya Perlad3791422012-09-28 04:39:44 +00004767 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4768 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004769
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004770 return 0;
4771
Sathya Perla5fb379e2009-06-18 00:02:59 +00004772unsetup:
4773 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004774stats_clean:
4775 be_stats_cleanup(adapter);
4776ctrl_clean:
4777 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004778free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004779 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004780rel_reg:
4781 pci_release_regions(pdev);
4782disable_dev:
4783 pci_disable_device(pdev);
4784do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004785 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004786 return status;
4787}
4788
4789static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4790{
4791 struct be_adapter *adapter = pci_get_drvdata(pdev);
4792 struct net_device *netdev = adapter->netdev;
4793
Suresh Reddy76a9e082014-01-15 13:23:40 +05304794 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004795 be_setup_wol(adapter, true);
4796
Ajit Khaparded4360d62013-11-22 12:51:09 -06004797 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004798 cancel_delayed_work_sync(&adapter->func_recovery_work);
4799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004800 netif_device_detach(netdev);
4801 if (netif_running(netdev)) {
4802 rtnl_lock();
4803 be_close(netdev);
4804 rtnl_unlock();
4805 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004806 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004807
4808 pci_save_state(pdev);
4809 pci_disable_device(pdev);
4810 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4811 return 0;
4812}
4813
4814static int be_resume(struct pci_dev *pdev)
4815{
4816 int status = 0;
4817 struct be_adapter *adapter = pci_get_drvdata(pdev);
4818 struct net_device *netdev = adapter->netdev;
4819
4820 netif_device_detach(netdev);
4821
4822 status = pci_enable_device(pdev);
4823 if (status)
4824 return status;
4825
Yijing Wang1ca01512013-06-27 20:53:42 +08004826 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004827 pci_restore_state(pdev);
4828
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304829 status = be_fw_wait_ready(adapter);
4830 if (status)
4831 return status;
4832
Ajit Khaparded4360d62013-11-22 12:51:09 -06004833 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004834 /* tell fw we're ready to fire cmds */
4835 status = be_cmd_fw_init(adapter);
4836 if (status)
4837 return status;
4838
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004839 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004840 if (netif_running(netdev)) {
4841 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004842 be_open(netdev);
4843 rtnl_unlock();
4844 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004845
4846 schedule_delayed_work(&adapter->func_recovery_work,
4847 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004848 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004849
Suresh Reddy76a9e082014-01-15 13:23:40 +05304850 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004851 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004853 return 0;
4854}
4855
Sathya Perla82456b02010-02-17 01:35:37 +00004856/*
4857 * An FLR will stop BE from DMAing any data.
4858 */
4859static void be_shutdown(struct pci_dev *pdev)
4860{
4861 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004862
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004863 if (!adapter)
4864 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004865
Sathya Perla0f4a6822011-03-21 20:49:28 +00004866 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004867 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004868
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004869 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004870
Ajit Khaparde57841862011-04-06 18:08:43 +00004871 be_cmd_reset_function(adapter);
4872
Sathya Perla82456b02010-02-17 01:35:37 +00004873 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004874}
4875
Sathya Perlacf588472010-02-14 21:22:01 +00004876static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05304877 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00004878{
4879 struct be_adapter *adapter = pci_get_drvdata(pdev);
4880 struct net_device *netdev = adapter->netdev;
4881
4882 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4883
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004884 if (!adapter->eeh_error) {
4885 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004886
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004887 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004888
Sathya Perlacf588472010-02-14 21:22:01 +00004889 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004890 netif_device_detach(netdev);
4891 if (netif_running(netdev))
4892 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004893 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004894
4895 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004896 }
Sathya Perlacf588472010-02-14 21:22:01 +00004897
4898 if (state == pci_channel_io_perm_failure)
4899 return PCI_ERS_RESULT_DISCONNECT;
4900
4901 pci_disable_device(pdev);
4902
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004903 /* The error could cause the FW to trigger a flash debug dump.
4904 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004905 * can cause it not to recover; wait for it to finish.
4906 * Wait only for first function as it is needed only once per
4907 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004908 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004909 if (pdev->devfn == 0)
4910 ssleep(30);
4911
Sathya Perlacf588472010-02-14 21:22:01 +00004912 return PCI_ERS_RESULT_NEED_RESET;
4913}
4914
4915static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4916{
4917 struct be_adapter *adapter = pci_get_drvdata(pdev);
4918 int status;
4919
4920 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004921
4922 status = pci_enable_device(pdev);
4923 if (status)
4924 return PCI_ERS_RESULT_DISCONNECT;
4925
4926 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004927 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004928 pci_restore_state(pdev);
4929
4930 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004931 dev_info(&adapter->pdev->dev,
4932 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004933 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004934 if (status)
4935 return PCI_ERS_RESULT_DISCONNECT;
4936
Sathya Perlad6b6d982012-09-05 01:56:48 +00004937 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004938 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004939 return PCI_ERS_RESULT_RECOVERED;
4940}
4941
4942static void be_eeh_resume(struct pci_dev *pdev)
4943{
4944 int status = 0;
4945 struct be_adapter *adapter = pci_get_drvdata(pdev);
4946 struct net_device *netdev = adapter->netdev;
4947
4948 dev_info(&adapter->pdev->dev, "EEH resume\n");
4949
4950 pci_save_state(pdev);
4951
Kalesh AP2d177be2013-04-28 22:22:29 +00004952 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004953 if (status)
4954 goto err;
4955
Kalesh AP2d177be2013-04-28 22:22:29 +00004956 /* tell fw we're ready to fire cmds */
4957 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004958 if (status)
4959 goto err;
4960
Sathya Perlacf588472010-02-14 21:22:01 +00004961 status = be_setup(adapter);
4962 if (status)
4963 goto err;
4964
4965 if (netif_running(netdev)) {
4966 status = be_open(netdev);
4967 if (status)
4968 goto err;
4969 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004970
4971 schedule_delayed_work(&adapter->func_recovery_work,
4972 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004973 netif_device_attach(netdev);
4974 return;
4975err:
4976 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004977}
4978
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004979static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004980 .error_detected = be_eeh_err_detected,
4981 .slot_reset = be_eeh_reset,
4982 .resume = be_eeh_resume,
4983};
4984
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004985static struct pci_driver be_driver = {
4986 .name = DRV_NAME,
4987 .id_table = be_dev_ids,
4988 .probe = be_probe,
4989 .remove = be_remove,
4990 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004991 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004992 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004993 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004994};
4995
4996static int __init be_init_module(void)
4997{
Joe Perches8e95a202009-12-03 07:58:21 +00004998 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4999 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005000 printk(KERN_WARNING DRV_NAME
5001 " : Module param rx_frag_size must be 2048/4096/8192."
5002 " Using 2048\n");
5003 rx_frag_size = 2048;
5004 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005005
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005006 return pci_register_driver(&be_driver);
5007}
5008module_init(be_init_module);
5009
5010static void __exit be_exit_module(void)
5011{
5012 pci_unregister_driver(&be_driver);
5013}
5014module_exit(be_exit_module);