blob: 3f04356afa821b589283aecf7954542dbfcaf9ef [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
1114 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1115 goto set_vlan_promisc;
1116 dev_err(&adapter->pdev->dev,
1117 "Setting HW VLAN filtering failed.\n");
1118 } else {
1119 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1120 /* hw VLAN filtering re-enabled. */
1121 status = be_cmd_rx_filter(adapter,
1122 BE_FLAGS_VLAN_PROMISC, OFF);
1123 if (!status) {
1124 dev_info(&adapter->pdev->dev,
1125 "Disabling VLAN Promiscuous mode.\n");
1126 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 }
1128 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001130
Sathya Perlab31c50a2009-09-17 10:30:13 -07001131 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001132
1133set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301134 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1135 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136
1137 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1138 if (!status) {
1139 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001140 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1141 } else
1142 dev_err(&adapter->pdev->dev,
1143 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001144 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145}
1146
Patrick McHardy80d5c362013-04-19 02:04:28 +00001147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001150 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301154 return status;
1155
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301156 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301157 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001158
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301159 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301160 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001161
Somnath Kotura6b74e02014-01-21 15:50:55 +05301162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301165 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301166 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301167
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001168 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169}
1170
Patrick McHardy80d5c362013-04-19 02:04:28 +00001171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001174 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
1178 goto ret;
1179
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301180 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301181 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001182 if (!status)
1183 adapter->vlans_added--;
1184 else
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301185 set_bit(vid, adapter->vids);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001186ret:
1187 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188}
1189
Somnath kotur7ad09452014-03-03 14:24:43 +05301190static void be_clear_promisc(struct be_adapter *adapter)
1191{
1192 adapter->promiscuous = false;
1193 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1194
1195 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1196}
1197
Sathya Perlaa54769f2011-10-24 02:45:00 +00001198static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001201 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
1203 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001204 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001205 adapter->promiscuous = true;
1206 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001208
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001209 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301211 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001212 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001213 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001214 }
1215
Sathya Perlae7b909a2009-11-22 22:01:10 +00001216 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001217 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301218 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001219 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001220 goto done;
1221 }
1222
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001223 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1224 struct netdev_hw_addr *ha;
1225 int i = 1; /* First slot is claimed by the Primary MAC */
1226
1227 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1228 be_cmd_pmac_del(adapter, adapter->if_handle,
1229 adapter->pmac_id[i], 0);
1230 }
1231
Sathya Perla92bf14a2013-08-27 16:57:32 +05301232 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001233 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1234 adapter->promiscuous = true;
1235 goto done;
1236 }
1237
1238 netdev_for_each_uc_addr(ha, adapter->netdev) {
1239 adapter->uc_macs++; /* First slot is for Primary MAC */
1240 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1241 adapter->if_handle,
1242 &adapter->pmac_id[adapter->uc_macs], 0);
1243 }
1244 }
1245
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001246 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1247
1248 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1249 if (status) {
Sathya Perla748b5392014-05-09 13:29:13 +05301250 dev_info(&adapter->pdev->dev,
1251 "Exhausted multicast HW filters.\n");
1252 dev_info(&adapter->pdev->dev,
1253 "Disabling HW multicast filtering.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001254 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001256done:
1257 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258}
1259
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001264 int status;
1265
Sathya Perla11ac75e2011-12-13 00:58:50 +00001266 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001267 return -EPERM;
1268
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001270 return -EINVAL;
1271
Sathya Perla3175d8c2013-07-23 15:25:03 +05301272 if (BEx_chip(adapter)) {
1273 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1274 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001275
Sathya Perla11ac75e2011-12-13 00:58:50 +00001276 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1277 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301278 } else {
1279 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1280 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001281 }
1282
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001283 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001284 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301285 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001286 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001287 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001288
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001289 return status;
1290}
1291
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001292static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301293 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001296 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001297
Sathya Perla11ac75e2011-12-13 00:58:50 +00001298 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001299 return -EPERM;
1300
Sathya Perla11ac75e2011-12-13 00:58:50 +00001301 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302 return -EINVAL;
1303
1304 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001305 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001306 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1307 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001308 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301309 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001310
1311 return 0;
1312}
1313
Sathya Perla748b5392014-05-09 13:29:13 +05301314static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001315{
1316 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001317 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001318 int status = 0;
1319
Sathya Perla11ac75e2011-12-13 00:58:50 +00001320 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001321 return -EPERM;
1322
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001323 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001324 return -EINVAL;
1325
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001326 if (vlan || qos) {
1327 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301328 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001329 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1330 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001331 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001332 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301333 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1334 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001335 }
1336
Somnath Koturc5022242014-03-03 14:24:20 +05301337 if (!status)
1338 vf_cfg->vlan_tag = vlan;
1339 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001340 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301341 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 return status;
1343}
1344
Sathya Perla748b5392014-05-09 13:29:13 +05301345static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001346{
1347 struct be_adapter *adapter = netdev_priv(netdev);
1348 int status = 0;
1349
Sathya Perla11ac75e2011-12-13 00:58:50 +00001350 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001351 return -EPERM;
1352
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001353 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001354 return -EINVAL;
1355
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001356 if (rate < 100 || rate > 10000) {
1357 dev_err(&adapter->pdev->dev,
1358 "tx rate must be between 100 and 10000 Mbps\n");
1359 return -EINVAL;
1360 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001361
Sathya Perlaa4018012014-03-27 10:46:18 +05301362 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001363 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001364 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301365 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001366 else
1367 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368 return status;
1369}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301370static int be_set_vf_link_state(struct net_device *netdev, int vf,
1371 int link_state)
1372{
1373 struct be_adapter *adapter = netdev_priv(netdev);
1374 int status;
1375
1376 if (!sriov_enabled(adapter))
1377 return -EPERM;
1378
1379 if (vf >= adapter->num_vfs)
1380 return -EINVAL;
1381
1382 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1383 if (!status)
1384 adapter->vf_cfg[vf].plink_tracking = link_state;
1385
1386 return status;
1387}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001388
Sathya Perla2632baf2013-10-01 16:00:00 +05301389static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1390 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391{
Sathya Perla2632baf2013-10-01 16:00:00 +05301392 aic->rx_pkts_prev = rx_pkts;
1393 aic->tx_reqs_prev = tx_pkts;
1394 aic->jiffies = now;
1395}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001396
Sathya Perla2632baf2013-10-01 16:00:00 +05301397static void be_eqd_update(struct be_adapter *adapter)
1398{
1399 struct be_set_eqd set_eqd[MAX_EVT_QS];
1400 int eqd, i, num = 0, start;
1401 struct be_aic_obj *aic;
1402 struct be_eq_obj *eqo;
1403 struct be_rx_obj *rxo;
1404 struct be_tx_obj *txo;
1405 u64 rx_pkts, tx_pkts;
1406 ulong now;
1407 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408
Sathya Perla2632baf2013-10-01 16:00:00 +05301409 for_all_evt_queues(adapter, eqo, i) {
1410 aic = &adapter->aic_obj[eqo->idx];
1411 if (!aic->enable) {
1412 if (aic->jiffies)
1413 aic->jiffies = 0;
1414 eqd = aic->et_eqd;
1415 goto modify_eqd;
1416 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417
Sathya Perla2632baf2013-10-01 16:00:00 +05301418 rxo = &adapter->rx_obj[eqo->idx];
1419 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001420 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301421 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001422 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001423
Sathya Perla2632baf2013-10-01 16:00:00 +05301424 txo = &adapter->tx_obj[eqo->idx];
1425 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001426 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301427 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001428 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001429
Sathya Perla4097f662009-03-24 16:40:13 -07001430
Sathya Perla2632baf2013-10-01 16:00:00 +05301431 /* Skip, if wrapped around or first calculation */
1432 now = jiffies;
1433 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1434 rx_pkts < aic->rx_pkts_prev ||
1435 tx_pkts < aic->tx_reqs_prev) {
1436 be_aic_update(aic, rx_pkts, tx_pkts, now);
1437 continue;
1438 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001439
Sathya Perla2632baf2013-10-01 16:00:00 +05301440 delta = jiffies_to_msecs(now - aic->jiffies);
1441 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1442 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1443 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001444
Sathya Perla2632baf2013-10-01 16:00:00 +05301445 if (eqd < 8)
1446 eqd = 0;
1447 eqd = min_t(u32, eqd, aic->max_eqd);
1448 eqd = max_t(u32, eqd, aic->min_eqd);
1449
1450 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301452 if (eqd != aic->prev_eqd) {
1453 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1454 set_eqd[num].eq_id = eqo->q.id;
1455 aic->prev_eqd = eqd;
1456 num++;
1457 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001458 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301459
1460 if (num)
1461 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001462}
1463
Sathya Perla3abcded2010-10-03 22:12:27 -07001464static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301465 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001466{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001467 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001468
Sathya Perlaab1594e2011-07-25 19:10:15 +00001469 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001470 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001471 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001472 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001473 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001474 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001475 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001476 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001477 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478}
1479
Sathya Perla2e588f82011-03-11 02:49:26 +00001480static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001481{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001482 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301483 * Also ignore ipcksm for ipv6 pkts
1484 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001485 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301486 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001487}
1488
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301489static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001491 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001493 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301494 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495
Sathya Perla3abcded2010-10-03 22:12:27 -07001496 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 BUG_ON(!rx_page_info->page);
1498
Sathya Perlae50287b2014-03-04 12:14:38 +05301499 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001500 dma_unmap_page(&adapter->pdev->dev,
1501 dma_unmap_addr(rx_page_info, bus),
1502 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301503 rx_page_info->last_frag = false;
1504 } else {
1505 dma_sync_single_for_cpu(&adapter->pdev->dev,
1506 dma_unmap_addr(rx_page_info, bus),
1507 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001508 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301510 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 atomic_dec(&rxq->used);
1512 return rx_page_info;
1513}
1514
1515/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001516static void be_rx_compl_discard(struct be_rx_obj *rxo,
1517 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001520 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001522 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301523 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001524 put_page(page_info->page);
1525 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526 }
1527}
1528
1529/*
1530 * skb_fill_rx_data forms a complete skb for an ether frame
1531 * indicated by rxcp.
1532 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001533static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1534 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537 u16 i, j;
1538 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539 u8 *start;
1540
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301541 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 start = page_address(page_info->page) + page_info->page_offset;
1543 prefetch(start);
1544
1545 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001546 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 skb->len = curr_frag_len;
1549 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001550 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 /* Complete packet has now been moved to data */
1552 put_page(page_info->page);
1553 skb->data_len = 0;
1554 skb->tail += curr_frag_len;
1555 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001556 hdr_len = ETH_HLEN;
1557 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001559 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 skb_shinfo(skb)->frags[0].page_offset =
1561 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301562 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1563 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001565 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 skb->tail += hdr_len;
1567 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001568 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569
Sathya Perla2e588f82011-03-11 02:49:26 +00001570 if (rxcp->pkt_size <= rx_frag_size) {
1571 BUG_ON(rxcp->num_rcvd != 1);
1572 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 }
1574
1575 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001576 remaining = rxcp->pkt_size - curr_frag_len;
1577 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301578 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001579 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001581 /* Coalesce all frags from the same physical page in one slot */
1582 if (page_info->page_offset == 0) {
1583 /* Fresh page */
1584 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001585 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001586 skb_shinfo(skb)->frags[j].page_offset =
1587 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001588 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001589 skb_shinfo(skb)->nr_frags++;
1590 } else {
1591 put_page(page_info->page);
1592 }
1593
Eric Dumazet9e903e02011-10-18 21:00:24 +00001594 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 skb->len += curr_frag_len;
1596 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001597 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001598 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001599 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001601 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602}
1603
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001604/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301605static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001609 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001611
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001612 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001613 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001614 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 return;
1617 }
1618
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001619 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001621 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001622 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001623 else
1624 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001626 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001627 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001628 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001629 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301630
1631 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301632 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633
Jiri Pirko343e43c2011-08-25 02:50:51 +00001634 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001635 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001636
1637 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638}
1639
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001640/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001641static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1642 struct napi_struct *napi,
1643 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001645 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001647 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001648 u16 remaining, curr_frag_len;
1649 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001650
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001651 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001652 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001653 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001654 return;
1655 }
1656
Sathya Perla2e588f82011-03-11 02:49:26 +00001657 remaining = rxcp->pkt_size;
1658 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301659 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660
1661 curr_frag_len = min(remaining, rx_frag_size);
1662
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001663 /* Coalesce all frags from the same physical page in one slot */
1664 if (i == 0 || page_info->page_offset == 0) {
1665 /* First frag or Fresh page */
1666 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001667 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001668 skb_shinfo(skb)->frags[j].page_offset =
1669 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001670 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001671 } else {
1672 put_page(page_info->page);
1673 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001674 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001675 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 memset(page_info, 0, sizeof(*page_info));
1678 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001679 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001681 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001682 skb->len = rxcp->pkt_size;
1683 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001684 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001685 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001686 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001687 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301688
1689 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301690 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001691
Jiri Pirko343e43c2011-08-25 02:50:51 +00001692 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001693 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001694
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001695 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696}
1697
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001698static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1699 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700{
Sathya Perla2e588f82011-03-11 02:49:26 +00001701 rxcp->pkt_size =
1702 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1703 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1704 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1705 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001706 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001707 rxcp->ip_csum =
1708 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1709 rxcp->l4_csum =
1710 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1711 rxcp->ipv6 =
1712 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001713 rxcp->num_rcvd =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1715 rxcp->pkt_type =
1716 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001717 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001718 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001719 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301720 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001721 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301722 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1723 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001724 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001725 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301726 rxcp->tunneled =
1727 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001728}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001730static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1731 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001732{
1733 rxcp->pkt_size =
1734 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1735 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1736 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1737 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001738 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001739 rxcp->ip_csum =
1740 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1741 rxcp->l4_csum =
1742 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1743 rxcp->ipv6 =
1744 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001745 rxcp->num_rcvd =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1747 rxcp->pkt_type =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001749 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001750 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001751 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301752 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001753 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301754 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1755 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001756 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001757 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001758 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1759 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001760}
1761
1762static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1763{
1764 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1765 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1766 struct be_adapter *adapter = rxo->adapter;
1767
1768 /* For checking the valid bit it is Ok to use either definition as the
1769 * valid bit is at the same position in both v0 and v1 Rx compl */
1770 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771 return NULL;
1772
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001773 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001774 be_dws_le_to_cpu(compl, sizeof(*compl));
1775
1776 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001777 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001778 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001779 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001780
Somnath Koture38b1702013-05-29 22:55:56 +00001781 if (rxcp->ip_frag)
1782 rxcp->l4_csum = 0;
1783
Sathya Perla15d72182011-03-21 20:49:26 +00001784 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301785 /* In QNQ modes, if qnq bit is not set, then the packet was
1786 * tagged only with the transparent outer vlan-tag and must
1787 * not be treated as a vlan packet by host
1788 */
1789 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001790 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001791
Sathya Perla15d72182011-03-21 20:49:26 +00001792 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001793 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001794
Somnath Kotur939cf302011-08-18 21:51:49 -07001795 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301796 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001797 rxcp->vlanf = 0;
1798 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001799
1800 /* As the compl has been parsed, reset it; we wont touch it again */
1801 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
Sathya Perla3abcded2010-10-03 22:12:27 -07001803 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804 return rxcp;
1805}
1806
Eric Dumazet1829b082011-03-01 05:48:12 +00001807static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001812 gfp |= __GFP_COMP;
1813 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814}
1815
1816/*
1817 * Allocate a page, split it to fragments of size rx_frag_size and post as
1818 * receive buffers to BE
1819 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001820static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821{
Sathya Perla3abcded2010-10-03 22:12:27 -07001822 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001823 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001826 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827 struct be_eth_rx_d *rxd;
1828 u64 page_dmaaddr = 0, frag_dmaaddr;
1829 u32 posted, page_offset = 0;
1830
Sathya Perla3abcded2010-10-03 22:12:27 -07001831 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1833 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001834 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001836 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 break;
1838 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001839 page_dmaaddr = dma_map_page(dev, pagep, 0,
1840 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001841 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001842 if (dma_mapping_error(dev, page_dmaaddr)) {
1843 put_page(pagep);
1844 pagep = NULL;
1845 rx_stats(rxo)->rx_post_fail++;
1846 break;
1847 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301848 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 } else {
1850 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301851 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301853 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
1856 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301857 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1859 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860
1861 /* Any space left in the current big page for another frag? */
1862 if ((page_offset + rx_frag_size + rx_frag_size) >
1863 adapter->big_page_size) {
1864 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301865 page_info->last_frag = true;
1866 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1867 } else {
1868 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001870
1871 prev_page_info = page_info;
1872 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001873 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301875
1876 /* Mark the last frag of a page when we break out of the above loop
1877 * with no more slots available in the RXQ
1878 */
1879 if (pagep) {
1880 prev_page_info->last_frag = true;
1881 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1882 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883
1884 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301886 if (rxo->rx_post_starved)
1887 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001888 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001889 } else if (atomic_read(&rxq->used) == 0) {
1890 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001891 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893}
1894
Sathya Perla5fb379e2009-06-18 00:02:59 +00001895static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1898
1899 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1900 return NULL;
1901
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001902 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1904
1905 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1906
1907 queue_tail_inc(tx_cq);
1908 return txcp;
1909}
1910
Sathya Perla3c8def92011-06-12 20:01:58 +00001911static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301912 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913{
Sathya Perla3c8def92011-06-12 20:01:58 +00001914 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001915 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001916 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001918 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1919 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001921 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001923 sent_skbs[txq->tail] = NULL;
1924
1925 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001926 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001928 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001930 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001931 unmap_tx_frag(&adapter->pdev->dev, wrb,
1932 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001933 unmap_skb_hdr = false;
1934
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 num_wrbs++;
1936 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001937 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001939 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001940 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941}
1942
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943/* Return the number of events in the event queue */
1944static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001945{
1946 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 do {
1950 eqe = queue_tail_node(&eqo->q);
1951 if (eqe->evt == 0)
1952 break;
1953
1954 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001955 eqe->evt = 0;
1956 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 queue_tail_inc(&eqo->q);
1958 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001959
1960 return num;
1961}
1962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001963/* Leaves the EQ is disarmed state */
1964static void be_eq_clean(struct be_eq_obj *eqo)
1965{
1966 int num = events_get(eqo);
1967
1968 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1969}
1970
1971static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972{
1973 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001974 struct be_queue_info *rxq = &rxo->q;
1975 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001976 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001977 struct be_adapter *adapter = rxo->adapter;
1978 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979
Sathya Perlad23e9462012-12-17 19:38:51 +00001980 /* Consume pending rx completions.
1981 * Wait for the flush completion (identified by zero num_rcvd)
1982 * to arrive. Notify CQ even when there are no more CQ entries
1983 * for HW to flush partially coalesced CQ entries.
1984 * In Lancer, there is no need to wait for flush compl.
1985 */
1986 for (;;) {
1987 rxcp = be_rx_compl_get(rxo);
1988 if (rxcp == NULL) {
1989 if (lancer_chip(adapter))
1990 break;
1991
1992 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1993 dev_warn(&adapter->pdev->dev,
1994 "did not receive flush compl\n");
1995 break;
1996 }
1997 be_cq_notify(adapter, rx_cq->id, true, 0);
1998 mdelay(1);
1999 } else {
2000 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002001 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002002 if (rxcp->num_rcvd == 0)
2003 break;
2004 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005 }
2006
Sathya Perlad23e9462012-12-17 19:38:51 +00002007 /* After cleanup, leave the CQ in unarmed state */
2008 be_cq_notify(adapter, rx_cq->id, false, 0);
2009
2010 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302011 while (atomic_read(&rxq->used) > 0) {
2012 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 put_page(page_info->page);
2014 memset(page_info, 0, sizeof(*page_info));
2015 }
2016 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002017 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018}
2019
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002020static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002022 struct be_tx_obj *txo;
2023 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002024 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002025 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002026 struct sk_buff *sent_skb;
2027 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002028 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302030 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002031 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002032 pending_txqs = adapter->num_tx_qs;
2033
2034 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302035 cmpl = 0;
2036 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002037 txq = &txo->q;
2038 while ((txcp = be_tx_compl_get(&txo->cq))) {
2039 end_idx =
2040 AMAP_GET_BITS(struct amap_eth_tx_compl,
2041 wrb_index, txcp);
2042 num_wrbs += be_tx_compl_process(adapter, txo,
2043 end_idx);
2044 cmpl++;
2045 }
2046 if (cmpl) {
2047 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2048 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302049 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002050 }
2051 if (atomic_read(&txq->used) == 0)
2052 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002053 }
2054
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302055 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002056 break;
2057
2058 mdelay(1);
2059 } while (true);
2060
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002061 for_all_tx_queues(adapter, txo, i) {
2062 txq = &txo->q;
2063 if (atomic_read(&txq->used))
2064 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2065 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002066
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002067 /* free posted tx for which compls will never arrive */
2068 while (atomic_read(&txq->used)) {
2069 sent_skb = txo->sent_skb_list[txq->tail];
2070 end_idx = txq->tail;
2071 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2072 &dummy_wrb);
2073 index_adv(&end_idx, num_wrbs - 1, txq->len);
2074 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2075 atomic_sub(num_wrbs, &txq->used);
2076 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002077 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078}
2079
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080static void be_evt_queues_destroy(struct be_adapter *adapter)
2081{
2082 struct be_eq_obj *eqo;
2083 int i;
2084
2085 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002086 if (eqo->q.created) {
2087 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302089 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302090 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002091 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 be_queue_free(adapter, &eqo->q);
2093 }
2094}
2095
2096static int be_evt_queues_create(struct be_adapter *adapter)
2097{
2098 struct be_queue_info *eq;
2099 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302100 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 int i, rc;
2102
Sathya Perla92bf14a2013-08-27 16:57:32 +05302103 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2104 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105
2106 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302107 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2108 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302109 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302110 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111 eqo->adapter = adapter;
2112 eqo->tx_budget = BE_TX_BUDGET;
2113 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302114 aic->max_eqd = BE_MAX_EQD;
2115 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116
2117 eq = &eqo->q;
2118 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302119 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002120 if (rc)
2121 return rc;
2122
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302123 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 if (rc)
2125 return rc;
2126 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002127 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128}
2129
Sathya Perla5fb379e2009-06-18 00:02:59 +00002130static void be_mcc_queues_destroy(struct be_adapter *adapter)
2131{
2132 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002133
Sathya Perla8788fdc2009-07-27 22:52:03 +00002134 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002135 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002136 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002137 be_queue_free(adapter, q);
2138
Sathya Perla8788fdc2009-07-27 22:52:03 +00002139 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002140 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002141 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002142 be_queue_free(adapter, q);
2143}
2144
2145/* Must be called only after TX qs are created as MCC shares TX EQ */
2146static int be_mcc_queues_create(struct be_adapter *adapter)
2147{
2148 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002149
Sathya Perla8788fdc2009-07-27 22:52:03 +00002150 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002151 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302152 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002153 goto err;
2154
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155 /* Use the default EQ for MCC completions */
2156 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002157 goto mcc_cq_free;
2158
Sathya Perla8788fdc2009-07-27 22:52:03 +00002159 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002160 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2161 goto mcc_cq_destroy;
2162
Sathya Perla8788fdc2009-07-27 22:52:03 +00002163 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002164 goto mcc_q_free;
2165
2166 return 0;
2167
2168mcc_q_free:
2169 be_queue_free(adapter, q);
2170mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002171 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002172mcc_cq_free:
2173 be_queue_free(adapter, cq);
2174err:
2175 return -1;
2176}
2177
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178static void be_tx_queues_destroy(struct be_adapter *adapter)
2179{
2180 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002181 struct be_tx_obj *txo;
2182 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183
Sathya Perla3c8def92011-06-12 20:01:58 +00002184 for_all_tx_queues(adapter, txo, i) {
2185 q = &txo->q;
2186 if (q->created)
2187 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2188 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
Sathya Perla3c8def92011-06-12 20:01:58 +00002190 q = &txo->cq;
2191 if (q->created)
2192 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2193 be_queue_free(adapter, q);
2194 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195}
2196
Sathya Perla77071332013-08-27 16:57:34 +05302197static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002200 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302201 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202
Sathya Perla92bf14a2013-08-27 16:57:32 +05302203 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002204
Sathya Perla3c8def92011-06-12 20:01:58 +00002205 for_all_tx_queues(adapter, txo, i) {
2206 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2208 sizeof(struct be_eth_tx_compl));
2209 if (status)
2210 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
John Stultz827da442013-10-07 15:51:58 -07002212 u64_stats_init(&txo->stats.sync);
2213 u64_stats_init(&txo->stats.sync_compl);
2214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 /* If num_evt_qs is less than num_tx_qs, then more than
2216 * one txq share an eq
2217 */
2218 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2219 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2220 if (status)
2221 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2224 sizeof(struct be_eth_wrb));
2225 if (status)
2226 return status;
2227
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002228 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002229 if (status)
2230 return status;
2231 }
2232
Sathya Perlad3791422012-09-28 04:39:44 +00002233 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2234 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002235 return 0;
2236}
2237
2238static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239{
2240 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002241 struct be_rx_obj *rxo;
2242 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243
Sathya Perla3abcded2010-10-03 22:12:27 -07002244 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002245 q = &rxo->cq;
2246 if (q->created)
2247 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2248 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250}
2251
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002253{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 struct be_rx_obj *rxo;
2256 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
Sathya Perla92bf14a2013-08-27 16:57:32 +05302258 /* We can create as many RSS rings as there are EQs. */
2259 adapter->num_rx_qs = adapter->num_evt_qs;
2260
2261 /* We'll use RSS only if atleast 2 RSS rings are supported.
2262 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302264 if (adapter->num_rx_qs > 1)
2265 adapter->num_rx_qs++;
2266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002268 for_all_rx_queues(adapter, rxo, i) {
2269 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002270 cq = &rxo->cq;
2271 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302272 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002273 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275
John Stultz827da442013-10-07 15:51:58 -07002276 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2278 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002279 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002281 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282
Sathya Perlad3791422012-09-28 04:39:44 +00002283 dev_info(&adapter->pdev->dev,
2284 "created %d RSS queue(s) and 1 default RX queue\n",
2285 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002287}
2288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289static irqreturn_t be_intx(int irq, void *dev)
2290{
Sathya Perlae49cc342012-11-27 19:50:02 +00002291 struct be_eq_obj *eqo = dev;
2292 struct be_adapter *adapter = eqo->adapter;
2293 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002295 /* IRQ is not expected when NAPI is scheduled as the EQ
2296 * will not be armed.
2297 * But, this can happen on Lancer INTx where it takes
2298 * a while to de-assert INTx or in BE2 where occasionaly
2299 * an interrupt may be raised even when EQ is unarmed.
2300 * If NAPI is already scheduled, then counting & notifying
2301 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002302 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002303 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002304 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002305 __napi_schedule(&eqo->napi);
2306 if (num_evts)
2307 eqo->spurious_intr = 0;
2308 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002309 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002310
2311 /* Return IRQ_HANDLED only for the the first spurious intr
2312 * after a valid intr to stop the kernel from branding
2313 * this irq as a bad one!
2314 */
2315 if (num_evts || eqo->spurious_intr++ == 0)
2316 return IRQ_HANDLED;
2317 else
2318 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319}
2320
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324
Sathya Perla0b545a62012-11-23 00:27:18 +00002325 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2326 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327 return IRQ_HANDLED;
2328}
2329
Sathya Perla2e588f82011-03-11 02:49:26 +00002330static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331{
Somnath Koture38b1702013-05-29 22:55:56 +00002332 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333}
2334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302336 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337{
Sathya Perla3abcded2010-10-03 22:12:27 -07002338 struct be_adapter *adapter = rxo->adapter;
2339 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002340 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341 u32 work_done;
2342
2343 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345 if (!rxcp)
2346 break;
2347
Sathya Perla12004ae2011-08-02 19:57:46 +00002348 /* Is it a flush compl that has no data */
2349 if (unlikely(rxcp->num_rcvd == 0))
2350 goto loop_continue;
2351
2352 /* Discard compl with partial DMA Lancer B0 */
2353 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002354 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002355 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002356 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002357
Sathya Perla12004ae2011-08-02 19:57:46 +00002358 /* On BE drop pkts that arrive due to imperfect filtering in
2359 * promiscuous mode on some skews
2360 */
2361 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302362 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002364 goto loop_continue;
2365 }
2366
Sathya Perla6384a4d2013-10-25 10:40:16 +05302367 /* Don't do gro when we're busy_polling */
2368 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002370 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302371 be_rx_compl_process(rxo, napi, rxcp);
2372
Sathya Perla12004ae2011-08-02 19:57:46 +00002373loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002374 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375 }
2376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377 if (work_done) {
2378 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002379
Sathya Perla6384a4d2013-10-25 10:40:16 +05302380 /* When an rx-obj gets into post_starved state, just
2381 * let be_worker do the posting.
2382 */
2383 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2384 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 return work_done;
2389}
2390
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2392 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 for (work_done = 0; work_done < budget; work_done++) {
2398 txcp = be_tx_compl_get(&txo->cq);
2399 if (!txcp)
2400 break;
2401 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302402 AMAP_GET_BITS(struct
2403 amap_eth_tx_compl,
2404 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 }
2406
2407 if (work_done) {
2408 be_cq_notify(adapter, txo->cq.id, true, work_done);
2409 atomic_sub(num_wrbs, &txo->q.used);
2410
2411 /* As Tx wrbs have been freed up, wake up netdev queue
2412 * if it was stopped due to lack of tx wrbs. */
2413 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302414 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002416 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002417
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2419 tx_stats(txo)->tx_compl += work_done;
2420 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2421 }
2422 return (work_done < budget); /* Done */
2423}
Sathya Perla3c8def92011-06-12 20:01:58 +00002424
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302425int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426{
2427 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2428 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002429 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302430 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002432
Sathya Perla0b545a62012-11-23 00:27:18 +00002433 num_evts = events_get(eqo);
2434
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 /* Process all TXQs serviced by this EQ */
2436 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2437 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2438 eqo->tx_budget, i);
2439 if (!tx_done)
2440 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441 }
2442
Sathya Perla6384a4d2013-10-25 10:40:16 +05302443 if (be_lock_napi(eqo)) {
2444 /* This loop will iterate twice for EQ0 in which
2445 * completions of the last RXQ (default one) are also processed
2446 * For other EQs the loop iterates only once
2447 */
2448 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2449 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2450 max_work = max(work, max_work);
2451 }
2452 be_unlock_napi(eqo);
2453 } else {
2454 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002455 }
2456
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002457 if (is_mcc_eqo(eqo))
2458 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002459
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002460 if (max_work < budget) {
2461 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002462 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002463 } else {
2464 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002465 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002466 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002468}
2469
Sathya Perla6384a4d2013-10-25 10:40:16 +05302470#ifdef CONFIG_NET_RX_BUSY_POLL
2471static int be_busy_poll(struct napi_struct *napi)
2472{
2473 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2474 struct be_adapter *adapter = eqo->adapter;
2475 struct be_rx_obj *rxo;
2476 int i, work = 0;
2477
2478 if (!be_lock_busy_poll(eqo))
2479 return LL_FLUSH_BUSY;
2480
2481 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2482 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2483 if (work)
2484 break;
2485 }
2486
2487 be_unlock_busy_poll(eqo);
2488 return work;
2489}
2490#endif
2491
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002492void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002493{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002494 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2495 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002496 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302497 bool error_detected = false;
2498 struct device *dev = &adapter->pdev->dev;
2499 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002500
Sathya Perlad23e9462012-12-17 19:38:51 +00002501 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002502 return;
2503
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002504 if (lancer_chip(adapter)) {
2505 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2506 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2507 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302508 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002509 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302510 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302511 adapter->hw_error = true;
2512 /* Do not log error messages if its a FW reset */
2513 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2514 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2515 dev_info(dev, "Firmware update in progress\n");
2516 } else {
2517 error_detected = true;
2518 dev_err(dev, "Error detected in the card\n");
2519 dev_err(dev, "ERR: sliport status 0x%x\n",
2520 sliport_status);
2521 dev_err(dev, "ERR: sliport error1 0x%x\n",
2522 sliport_err1);
2523 dev_err(dev, "ERR: sliport error2 0x%x\n",
2524 sliport_err2);
2525 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002526 }
2527 } else {
2528 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302529 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002530 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302531 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002532 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302533 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002534 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302535 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002536
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002537 ue_lo = (ue_lo & ~ue_lo_mask);
2538 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002539
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302540 /* On certain platforms BE hardware can indicate spurious UEs.
2541 * Allow HW to stop working completely in case of a real UE.
2542 * Hence not setting the hw_error for UE detection.
2543 */
2544
2545 if (ue_lo || ue_hi) {
2546 error_detected = true;
2547 dev_err(dev,
2548 "Unrecoverable Error detected in the adapter");
2549 dev_err(dev, "Please reboot server to recover");
2550 if (skyhawk_chip(adapter))
2551 adapter->hw_error = true;
2552 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2553 if (ue_lo & 1)
2554 dev_err(dev, "UE: %s bit set\n",
2555 ue_status_low_desc[i]);
2556 }
2557 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2558 if (ue_hi & 1)
2559 dev_err(dev, "UE: %s bit set\n",
2560 ue_status_hi_desc[i]);
2561 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302562 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002563 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302564 if (error_detected)
2565 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002566}
2567
Sathya Perla8d56ff12009-11-22 22:02:26 +00002568static void be_msix_disable(struct be_adapter *adapter)
2569{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002570 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002571 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002572 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302573 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002574 }
2575}
2576
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002577static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002578{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002579 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002580 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581
Sathya Perla92bf14a2013-08-27 16:57:32 +05302582 /* If RoCE is supported, program the max number of NIC vectors that
2583 * may be configured via set-channels, along with vectors needed for
2584 * RoCe. Else, just program the number we'll use initially.
2585 */
2586 if (be_roce_supported(adapter))
2587 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2588 2 * num_online_cpus());
2589 else
2590 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002591
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002592 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 adapter->msix_entries[i].entry = i;
2594
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002595 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2596 MIN_MSIX_VECTORS, num_vec);
2597 if (num_vec < 0)
2598 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002599
Sathya Perla92bf14a2013-08-27 16:57:32 +05302600 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2601 adapter->num_msix_roce_vec = num_vec / 2;
2602 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2603 adapter->num_msix_roce_vec);
2604 }
2605
2606 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2607
2608 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2609 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002610 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002611
2612fail:
2613 dev_warn(dev, "MSIx enable failed\n");
2614
2615 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2616 if (!be_physfn(adapter))
2617 return num_vec;
2618 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619}
2620
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002621static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302622 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302624 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002625}
2626
2627static int be_msix_register(struct be_adapter *adapter)
2628{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629 struct net_device *netdev = adapter->netdev;
2630 struct be_eq_obj *eqo;
2631 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 for_all_evt_queues(adapter, eqo, i) {
2634 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2635 vec = be_msix_vec_get(adapter, eqo);
2636 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002637 if (status)
2638 goto err_msix;
2639 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002640
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002642err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2644 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2645 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302646 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002647 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648 return status;
2649}
2650
2651static int be_irq_register(struct be_adapter *adapter)
2652{
2653 struct net_device *netdev = adapter->netdev;
2654 int status;
2655
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002656 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002657 status = be_msix_register(adapter);
2658 if (status == 0)
2659 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002660 /* INTx is not supported for VF */
2661 if (!be_physfn(adapter))
2662 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002663 }
2664
Sathya Perlae49cc342012-11-27 19:50:02 +00002665 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002666 netdev->irq = adapter->pdev->irq;
2667 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002668 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002669 if (status) {
2670 dev_err(&adapter->pdev->dev,
2671 "INTx request IRQ failed - err %d\n", status);
2672 return status;
2673 }
2674done:
2675 adapter->isr_registered = true;
2676 return 0;
2677}
2678
2679static void be_irq_unregister(struct be_adapter *adapter)
2680{
2681 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002683 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002684
2685 if (!adapter->isr_registered)
2686 return;
2687
2688 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002689 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002690 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691 goto done;
2692 }
2693
2694 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002695 for_all_evt_queues(adapter, eqo, i)
2696 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002697
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002698done:
2699 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700}
2701
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002702static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002703{
2704 struct be_queue_info *q;
2705 struct be_rx_obj *rxo;
2706 int i;
2707
2708 for_all_rx_queues(adapter, rxo, i) {
2709 q = &rxo->q;
2710 if (q->created) {
2711 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002713 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002714 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002715 }
2716}
2717
Sathya Perla889cd4b2010-05-30 23:33:45 +00002718static int be_close(struct net_device *netdev)
2719{
2720 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002721 struct be_eq_obj *eqo;
2722 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002723
Kalesh APe1ad8e32014-04-14 16:12:41 +05302724 /* This protection is needed as be_close() may be called even when the
2725 * adapter is in cleared state (after eeh perm failure)
2726 */
2727 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2728 return 0;
2729
Parav Pandit045508a2012-03-26 14:27:13 +00002730 be_roce_dev_close(adapter);
2731
Ivan Veceradff345c52013-11-27 08:59:32 +01002732 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2733 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002734 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302735 be_disable_busy_poll(eqo);
2736 }
David S. Miller71237b62013-11-28 18:53:36 -05002737 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002738 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002739
2740 be_async_mcc_disable(adapter);
2741
2742 /* Wait for all pending tx completions to arrive so that
2743 * all tx skbs are freed.
2744 */
Sathya Perlafba87552013-05-08 02:05:50 +00002745 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302746 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002747
2748 be_rx_qs_destroy(adapter);
2749
Ajit Khaparded11a3472013-11-18 10:44:37 -06002750 for (i = 1; i < (adapter->uc_macs + 1); i++)
2751 be_cmd_pmac_del(adapter, adapter->if_handle,
2752 adapter->pmac_id[i], 0);
2753 adapter->uc_macs = 0;
2754
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002755 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002756 if (msix_enabled(adapter))
2757 synchronize_irq(be_msix_vec_get(adapter, eqo));
2758 else
2759 synchronize_irq(netdev->irq);
2760 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002761 }
2762
Sathya Perla889cd4b2010-05-30 23:33:45 +00002763 be_irq_unregister(adapter);
2764
Sathya Perla482c9e72011-06-29 23:33:17 +00002765 return 0;
2766}
2767
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002769{
2770 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002771 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302772 u8 rss_hkey[RSS_HASH_KEY_LEN];
2773 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002774
2775 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002776 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2777 sizeof(struct be_eth_rx_d));
2778 if (rc)
2779 return rc;
2780 }
2781
2782 /* The FW would like the default RXQ to be created first */
2783 rxo = default_rxo(adapter);
2784 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2785 adapter->if_handle, false, &rxo->rss_id);
2786 if (rc)
2787 return rc;
2788
2789 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002790 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002791 rx_frag_size, adapter->if_handle,
2792 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002793 if (rc)
2794 return rc;
2795 }
2796
2797 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302798 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2799 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002800 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302801 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002802 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302803 rss->rsstable[j + i] = rxo->rss_id;
2804 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002805 }
2806 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302807 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2808 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002809
2810 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302811 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2812 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302813 } else {
2814 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302815 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302816 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002817
Venkata Duvvurue2557872014-04-21 15:38:00 +05302818 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302819 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302820 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302821 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302822 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302823 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002824 }
2825
Venkata Duvvurue2557872014-04-21 15:38:00 +05302826 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2827
Sathya Perla482c9e72011-06-29 23:33:17 +00002828 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002829 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002830 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002831 return 0;
2832}
2833
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002834static int be_open(struct net_device *netdev)
2835{
2836 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002838 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002839 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002840 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002841 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002842
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002844 if (status)
2845 goto err;
2846
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002847 status = be_irq_register(adapter);
2848 if (status)
2849 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002851 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002852 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002853
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002854 for_all_tx_queues(adapter, txo, i)
2855 be_cq_notify(adapter, txo->cq.id, true, 0);
2856
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002857 be_async_mcc_enable(adapter);
2858
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002859 for_all_evt_queues(adapter, eqo, i) {
2860 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302861 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2863 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002864 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002865
Sathya Perla323ff712012-09-28 04:39:43 +00002866 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002867 if (!status)
2868 be_link_status_update(adapter, link_status);
2869
Sathya Perlafba87552013-05-08 02:05:50 +00002870 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002871 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302872
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302873#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302874 if (skyhawk_chip(adapter))
2875 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302876#endif
2877
Sathya Perla889cd4b2010-05-30 23:33:45 +00002878 return 0;
2879err:
2880 be_close(adapter->netdev);
2881 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002882}
2883
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002884static int be_setup_wol(struct be_adapter *adapter, bool enable)
2885{
2886 struct be_dma_mem cmd;
2887 int status = 0;
2888 u8 mac[ETH_ALEN];
2889
2890 memset(mac, 0, ETH_ALEN);
2891
2892 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002893 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2894 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002895 if (cmd.va == NULL)
2896 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002897
2898 if (enable) {
2899 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302900 PCICFG_PM_CONTROL_OFFSET,
2901 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002902 if (status) {
2903 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002904 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002905 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2906 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002907 return status;
2908 }
2909 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302910 adapter->netdev->dev_addr,
2911 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002912 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2913 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2914 } else {
2915 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2916 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2917 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2918 }
2919
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002920 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002921 return status;
2922}
2923
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002924/*
2925 * Generate a seed MAC address from the PF MAC Address using jhash.
2926 * MAC Address for VFs are assigned incrementally starting from the seed.
2927 * These addresses are programmed in the ASIC by the PF and the VF driver
2928 * queries for the MAC address during its probe.
2929 */
Sathya Perla4c876612013-02-03 20:30:11 +00002930static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002931{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002932 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002933 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002934 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002935 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002936
2937 be_vf_eth_addr_generate(adapter, mac);
2938
Sathya Perla11ac75e2011-12-13 00:58:50 +00002939 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302940 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002941 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002942 vf_cfg->if_handle,
2943 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302944 else
2945 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2946 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002947
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002948 if (status)
2949 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302950 "Mac address assignment failed for VF %d\n",
2951 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002952 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002953 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002954
2955 mac[5] += 1;
2956 }
2957 return status;
2958}
2959
Sathya Perla4c876612013-02-03 20:30:11 +00002960static int be_vfs_mac_query(struct be_adapter *adapter)
2961{
2962 int status, vf;
2963 u8 mac[ETH_ALEN];
2964 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002965
2966 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302967 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2968 mac, vf_cfg->if_handle,
2969 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002970 if (status)
2971 return status;
2972 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2973 }
2974 return 0;
2975}
2976
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002977static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002978{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002979 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002980 u32 vf;
2981
Sathya Perla257a3fe2013-06-14 15:54:51 +05302982 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002983 dev_warn(&adapter->pdev->dev,
2984 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002985 goto done;
2986 }
2987
Sathya Perlab4c1df92013-05-08 02:05:47 +00002988 pci_disable_sriov(adapter->pdev);
2989
Sathya Perla11ac75e2011-12-13 00:58:50 +00002990 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302991 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002992 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2993 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302994 else
2995 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2996 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002997
Sathya Perla11ac75e2011-12-13 00:58:50 +00002998 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2999 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003000done:
3001 kfree(adapter->vf_cfg);
3002 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003003}
3004
Sathya Perla77071332013-08-27 16:57:34 +05303005static void be_clear_queues(struct be_adapter *adapter)
3006{
3007 be_mcc_queues_destroy(adapter);
3008 be_rx_cqs_destroy(adapter);
3009 be_tx_queues_destroy(adapter);
3010 be_evt_queues_destroy(adapter);
3011}
3012
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303013static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003014{
Sathya Perla191eb752012-02-23 18:50:13 +00003015 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3016 cancel_delayed_work_sync(&adapter->work);
3017 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3018 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303019}
3020
Somnath Koturb05004a2013-12-05 12:08:16 +05303021static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303022{
3023 int i;
3024
Somnath Koturb05004a2013-12-05 12:08:16 +05303025 if (adapter->pmac_id) {
3026 for (i = 0; i < (adapter->uc_macs + 1); i++)
3027 be_cmd_pmac_del(adapter, adapter->if_handle,
3028 adapter->pmac_id[i], 0);
3029 adapter->uc_macs = 0;
3030
3031 kfree(adapter->pmac_id);
3032 adapter->pmac_id = NULL;
3033 }
3034}
3035
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303036#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303037static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3038{
3039 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3040 be_cmd_manage_iface(adapter, adapter->if_handle,
3041 OP_CONVERT_TUNNEL_TO_NORMAL);
3042
3043 if (adapter->vxlan_port)
3044 be_cmd_set_vxlan_port(adapter, 0);
3045
3046 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3047 adapter->vxlan_port = 0;
3048}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303049#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303050
Somnath Koturb05004a2013-12-05 12:08:16 +05303051static int be_clear(struct be_adapter *adapter)
3052{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303053 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003054
Sathya Perla11ac75e2011-12-13 00:58:50 +00003055 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003056 be_vf_clear(adapter);
3057
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303058#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303059 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303060#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303061 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303062 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003063
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003064 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003065
Sathya Perla77071332013-08-27 16:57:34 +05303066 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003068 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303069 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003070 return 0;
3071}
3072
Sathya Perla4c876612013-02-03 20:30:11 +00003073static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003074{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303075 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003076 struct be_vf_cfg *vf_cfg;
3077 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003078 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003079
Sathya Perla4c876612013-02-03 20:30:11 +00003080 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3081 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003082
Sathya Perla4c876612013-02-03 20:30:11 +00003083 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303084 if (!BE3_chip(adapter)) {
3085 status = be_cmd_get_profile_config(adapter, &res,
3086 vf + 1);
3087 if (!status)
3088 cap_flags = res.if_cap_flags;
3089 }
Sathya Perla4c876612013-02-03 20:30:11 +00003090
3091 /* If a FW profile exists, then cap_flags are updated */
3092 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303093 BE_IF_FLAGS_BROADCAST |
3094 BE_IF_FLAGS_MULTICAST);
3095 status =
3096 be_cmd_if_create(adapter, cap_flags, en_flags,
3097 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003098 if (status)
3099 goto err;
3100 }
3101err:
3102 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003103}
3104
Sathya Perla39f1d942012-05-08 19:41:24 +00003105static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003106{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003107 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003108 int vf;
3109
Sathya Perla39f1d942012-05-08 19:41:24 +00003110 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3111 GFP_KERNEL);
3112 if (!adapter->vf_cfg)
3113 return -ENOMEM;
3114
Sathya Perla11ac75e2011-12-13 00:58:50 +00003115 for_all_vfs(adapter, vf_cfg, vf) {
3116 vf_cfg->if_handle = -1;
3117 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003118 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003119 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003120}
3121
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003122static int be_vf_setup(struct be_adapter *adapter)
3123{
Sathya Perla4c876612013-02-03 20:30:11 +00003124 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303125 struct be_vf_cfg *vf_cfg;
3126 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303127 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303128 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003129
Sathya Perla257a3fe2013-06-14 15:54:51 +05303130 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003131 if (old_vfs) {
3132 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3133 if (old_vfs != num_vfs)
3134 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3135 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003136 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303137 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003138 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303139 be_max_vfs(adapter), num_vfs);
3140 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003141 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003142 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003143 }
3144
3145 status = be_vf_setup_init(adapter);
3146 if (status)
3147 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003148
Sathya Perla4c876612013-02-03 20:30:11 +00003149 if (old_vfs) {
3150 for_all_vfs(adapter, vf_cfg, vf) {
3151 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3152 if (status)
3153 goto err;
3154 }
3155 } else {
3156 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003157 if (status)
3158 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003159 }
3160
Sathya Perla4c876612013-02-03 20:30:11 +00003161 if (old_vfs) {
3162 status = be_vfs_mac_query(adapter);
3163 if (status)
3164 goto err;
3165 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003166 status = be_vf_eth_addr_config(adapter);
3167 if (status)
3168 goto err;
3169 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003170
Sathya Perla11ac75e2011-12-13 00:58:50 +00003171 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303172 /* Allow VFs to programs MAC/VLAN filters */
3173 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3174 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3175 status = be_cmd_set_fn_privileges(adapter,
3176 privileges |
3177 BE_PRIV_FILTMGMT,
3178 vf + 1);
3179 if (!status)
3180 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3181 vf);
3182 }
3183
Sathya Perla4c876612013-02-03 20:30:11 +00003184 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3185 * Allow full available bandwidth
3186 */
3187 if (BE3_chip(adapter) && !old_vfs)
Sathya Perlaa4018012014-03-27 10:46:18 +05303188 be_cmd_config_qos(adapter, 1000, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003189
3190 status = be_cmd_link_status_query(adapter, &lnk_speed,
3191 NULL, vf + 1);
3192 if (!status)
3193 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003194
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303195 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303196 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303197 be_cmd_set_logical_link_config(adapter,
3198 IFLA_VF_LINK_STATE_AUTO,
3199 vf+1);
3200 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003201 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003202
3203 if (!old_vfs) {
3204 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3205 if (status) {
3206 dev_err(dev, "SRIOV enable failed\n");
3207 adapter->num_vfs = 0;
3208 goto err;
3209 }
3210 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003211 return 0;
3212err:
Sathya Perla4c876612013-02-03 20:30:11 +00003213 dev_err(dev, "VF setup failed\n");
3214 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003215 return status;
3216}
3217
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303218/* Converting function_mode bits on BE3 to SH mc_type enums */
3219
3220static u8 be_convert_mc_type(u32 function_mode)
3221{
3222 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3223 return vNIC1;
3224 else if (function_mode & FLEX10_MODE)
3225 return FLEX10;
3226 else if (function_mode & VNIC_MODE)
3227 return vNIC2;
3228 else if (function_mode & UMC_ENABLED)
3229 return UMC;
3230 else
3231 return MC_NONE;
3232}
3233
Sathya Perla92bf14a2013-08-27 16:57:32 +05303234/* On BE2/BE3 FW does not suggest the supported limits */
3235static void BEx_get_resources(struct be_adapter *adapter,
3236 struct be_resources *res)
3237{
3238 struct pci_dev *pdev = adapter->pdev;
3239 bool use_sriov = false;
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303240 int max_vfs = 0;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303241
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303242 if (be_physfn(adapter) && BE3_chip(adapter)) {
3243 be_cmd_get_profile_config(adapter, res, 0);
3244 /* Some old versions of BE3 FW don't report max_vfs value */
3245 if (res->max_vfs == 0) {
3246 max_vfs = pci_sriov_get_totalvfs(pdev);
3247 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3248 }
3249 use_sriov = res->max_vfs && sriov_want(adapter);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303250 }
3251
3252 if (be_physfn(adapter))
3253 res->max_uc_mac = BE_UC_PMAC_COUNT;
3254 else
3255 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3256
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303257 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3258
3259 if (be_is_mc(adapter)) {
3260 /* Assuming that there are 4 channels per port,
3261 * when multi-channel is enabled
3262 */
3263 if (be_is_qnq_mode(adapter))
3264 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3265 else
3266 /* In a non-qnq multichannel mode, the pvid
3267 * takes up one vlan entry
3268 */
3269 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3270 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303271 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303272 }
3273
Sathya Perla92bf14a2013-08-27 16:57:32 +05303274 res->max_mcast_mac = BE_MAX_MC;
3275
Vasundhara Volama5243da2014-03-11 18:53:07 +05303276 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3277 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3278 * *only* if it is RSS-capable.
3279 */
3280 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3281 !be_physfn(adapter) || (be_is_mc(adapter) &&
3282 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303283 res->max_tx_qs = 1;
3284 else
3285 res->max_tx_qs = BE3_MAX_TX_QS;
3286
3287 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3288 !use_sriov && be_physfn(adapter))
3289 res->max_rss_qs = (adapter->be3_native) ?
3290 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3291 res->max_rx_qs = res->max_rss_qs + 1;
3292
Suresh Reddye3dc8672014-01-06 13:02:25 +05303293 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303294 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303295 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3296 else
3297 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303298
3299 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3300 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3301 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3302}
3303
Sathya Perla30128032011-11-10 19:17:57 +00003304static void be_setup_init(struct be_adapter *adapter)
3305{
3306 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003307 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003308 adapter->if_handle = -1;
3309 adapter->be3_native = false;
3310 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003311 if (be_physfn(adapter))
3312 adapter->cmd_privileges = MAX_PRIVILEGES;
3313 else
3314 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003315}
3316
Sathya Perla92bf14a2013-08-27 16:57:32 +05303317static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003318{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303319 struct device *dev = &adapter->pdev->dev;
3320 struct be_resources res = {0};
3321 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003322
Sathya Perla92bf14a2013-08-27 16:57:32 +05303323 if (BEx_chip(adapter)) {
3324 BEx_get_resources(adapter, &res);
3325 adapter->res = res;
3326 }
3327
Sathya Perla92bf14a2013-08-27 16:57:32 +05303328 /* For Lancer, SH etc read per-function resource limits from FW.
3329 * GET_FUNC_CONFIG returns per function guaranteed limits.
3330 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3331 */
Sathya Perla4c876612013-02-03 20:30:11 +00003332 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303333 status = be_cmd_get_func_config(adapter, &res);
3334 if (status)
3335 return status;
3336
3337 /* If RoCE may be enabled stash away half the EQs for RoCE */
3338 if (be_roce_supported(adapter))
3339 res.max_evt_qs /= 2;
3340 adapter->res = res;
3341
3342 if (be_physfn(adapter)) {
3343 status = be_cmd_get_profile_config(adapter, &res, 0);
3344 if (status)
3345 return status;
3346 adapter->res.max_vfs = res.max_vfs;
3347 }
3348
3349 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3350 be_max_txqs(adapter), be_max_rxqs(adapter),
3351 be_max_rss(adapter), be_max_eqs(adapter),
3352 be_max_vfs(adapter));
3353 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3354 be_max_uc(adapter), be_max_mc(adapter),
3355 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003356 }
3357
Sathya Perla92bf14a2013-08-27 16:57:32 +05303358 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003359}
3360
Sathya Perla39f1d942012-05-08 19:41:24 +00003361/* Routine to query per function resource limits */
3362static int be_get_config(struct be_adapter *adapter)
3363{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303364 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003365 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003366
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003367 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3368 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003369 &adapter->function_caps,
3370 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003371 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303372 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003373
Vasundhara Volam542963b2014-01-15 13:23:33 +05303374 if (be_physfn(adapter)) {
3375 status = be_cmd_get_active_profile(adapter, &profile_id);
3376 if (!status)
3377 dev_info(&adapter->pdev->dev,
3378 "Using profile 0x%x\n", profile_id);
3379 }
3380
Sathya Perla92bf14a2013-08-27 16:57:32 +05303381 status = be_get_resources(adapter);
3382 if (status)
3383 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003384
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303385 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3386 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303387 if (!adapter->pmac_id)
3388 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003389
Sathya Perla92bf14a2013-08-27 16:57:32 +05303390 /* Sanitize cfg_num_qs based on HW and platform limits */
3391 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3392
3393 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003394}
3395
Sathya Perla95046b92013-07-23 15:25:02 +05303396static int be_mac_setup(struct be_adapter *adapter)
3397{
3398 u8 mac[ETH_ALEN];
3399 int status;
3400
3401 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3402 status = be_cmd_get_perm_mac(adapter, mac);
3403 if (status)
3404 return status;
3405
3406 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3407 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3408 } else {
3409 /* Maybe the HW was reset; dev_addr must be re-programmed */
3410 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3411 }
3412
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003413 /* For BE3-R VFs, the PF programs the initial MAC address */
3414 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3415 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3416 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303417 return 0;
3418}
3419
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303420static void be_schedule_worker(struct be_adapter *adapter)
3421{
3422 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3423 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3424}
3425
Sathya Perla77071332013-08-27 16:57:34 +05303426static int be_setup_queues(struct be_adapter *adapter)
3427{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303428 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303429 int status;
3430
3431 status = be_evt_queues_create(adapter);
3432 if (status)
3433 goto err;
3434
3435 status = be_tx_qs_create(adapter);
3436 if (status)
3437 goto err;
3438
3439 status = be_rx_cqs_create(adapter);
3440 if (status)
3441 goto err;
3442
3443 status = be_mcc_queues_create(adapter);
3444 if (status)
3445 goto err;
3446
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303447 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3448 if (status)
3449 goto err;
3450
3451 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3452 if (status)
3453 goto err;
3454
Sathya Perla77071332013-08-27 16:57:34 +05303455 return 0;
3456err:
3457 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3458 return status;
3459}
3460
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303461int be_update_queues(struct be_adapter *adapter)
3462{
3463 struct net_device *netdev = adapter->netdev;
3464 int status;
3465
3466 if (netif_running(netdev))
3467 be_close(netdev);
3468
3469 be_cancel_worker(adapter);
3470
3471 /* If any vectors have been shared with RoCE we cannot re-program
3472 * the MSIx table.
3473 */
3474 if (!adapter->num_msix_roce_vec)
3475 be_msix_disable(adapter);
3476
3477 be_clear_queues(adapter);
3478
3479 if (!msix_enabled(adapter)) {
3480 status = be_msix_enable(adapter);
3481 if (status)
3482 return status;
3483 }
3484
3485 status = be_setup_queues(adapter);
3486 if (status)
3487 return status;
3488
3489 be_schedule_worker(adapter);
3490
3491 if (netif_running(netdev))
3492 status = be_open(netdev);
3493
3494 return status;
3495}
3496
Sathya Perla5fb379e2009-06-18 00:02:59 +00003497static int be_setup(struct be_adapter *adapter)
3498{
Sathya Perla39f1d942012-05-08 19:41:24 +00003499 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303500 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003501 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003502
Sathya Perla30128032011-11-10 19:17:57 +00003503 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003504
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003505 if (!lancer_chip(adapter))
3506 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003507
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003508 status = be_get_config(adapter);
3509 if (status)
3510 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003511
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003512 status = be_msix_enable(adapter);
3513 if (status)
3514 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003515
Sathya Perla77071332013-08-27 16:57:34 +05303516 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3517 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3518 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3519 en_flags |= BE_IF_FLAGS_RSS;
3520 en_flags = en_flags & be_if_cap_flags(adapter);
3521 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3522 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003523 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003524 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003525
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303526 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3527 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303528 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303529 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003530 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003531 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003532
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003533 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003534
Sathya Perla95046b92013-07-23 15:25:02 +05303535 status = be_mac_setup(adapter);
3536 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003537 goto err;
3538
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003539 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003540
Somnath Koture9e2a902013-10-24 14:37:53 +05303541 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3542 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3543 adapter->fw_ver);
3544 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3545 }
3546
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003547 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003548 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003549
3550 be_set_rx_mode(adapter->netdev);
3551
Suresh Reddy76a9e082014-01-15 13:23:40 +05303552 be_cmd_get_acpi_wol_cap(adapter);
3553
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003554 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003555
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003556 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3557 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003558 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003559
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303560 if (be_physfn(adapter))
3561 be_cmd_set_logical_link_config(adapter,
3562 IFLA_VF_LINK_STATE_AUTO, 0);
3563
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303564 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303565 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003566 be_vf_setup(adapter);
3567 else
3568 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003569 }
3570
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003571 status = be_cmd_get_phy_info(adapter);
3572 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003573 adapter->phy.fc_autoneg = 1;
3574
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303575 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303576 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003577 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003578err:
3579 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580 return status;
3581}
3582
Ivan Vecera66268732011-12-08 01:31:21 +00003583#ifdef CONFIG_NET_POLL_CONTROLLER
3584static void be_netpoll(struct net_device *netdev)
3585{
3586 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003587 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003588 int i;
3589
Sathya Perlae49cc342012-11-27 19:50:02 +00003590 for_all_evt_queues(adapter, eqo, i) {
3591 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3592 napi_schedule(&eqo->napi);
3593 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003594
3595 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003596}
3597#endif
3598
Ajit Khaparde84517482009-09-04 03:12:16 +00003599#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003600static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003601
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003602static bool be_flash_redboot(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303603 const u8 *p, u32 img_start, int image_size,
3604 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003605{
3606 u32 crc_offset;
3607 u8 flashed_crc[4];
3608 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003609
3610 crc_offset = hdr_size + img_start + image_size - 4;
3611
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003612 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003613
Sathya Perla748b5392014-05-09 13:29:13 +05303614 status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003615 if (status) {
3616 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303617 "could not get crc from flash, not flashing redboot\n");
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003618 return false;
3619 }
3620
3621 /*update redboot only if crc does not match*/
3622 if (!memcmp(flashed_crc, p, 4))
3623 return false;
3624 else
3625 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003626}
3627
Sathya Perla306f1342011-08-02 19:57:45 +00003628static bool phy_flashing_required(struct be_adapter *adapter)
3629{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003630 return (adapter->phy.phy_type == TN_8022 &&
3631 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003632}
3633
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003634static bool is_comp_in_ufi(struct be_adapter *adapter,
3635 struct flash_section_info *fsec, int type)
3636{
3637 int i = 0, img_type = 0;
3638 struct flash_section_info_g2 *fsec_g2 = NULL;
3639
Sathya Perlaca34fe32012-11-06 17:48:56 +00003640 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003641 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3642
3643 for (i = 0; i < MAX_FLASH_COMP; i++) {
3644 if (fsec_g2)
3645 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3646 else
3647 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3648
3649 if (img_type == type)
3650 return true;
3651 }
3652 return false;
3653
3654}
3655
Jingoo Han4188e7d2013-08-05 18:02:02 +09003656static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303657 int header_size,
3658 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003659{
3660 struct flash_section_info *fsec = NULL;
3661 const u8 *p = fw->data;
3662
3663 p += header_size;
3664 while (p < (fw->data + fw->size)) {
3665 fsec = (struct flash_section_info *)p;
3666 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3667 return fsec;
3668 p += 32;
3669 }
3670 return NULL;
3671}
3672
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003673static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303674 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003675{
3676 u32 total_bytes = 0, flash_op, num_bytes = 0;
3677 int status = 0;
3678 struct be_cmd_write_flashrom *req = flash_cmd->va;
3679
3680 total_bytes = img_size;
3681 while (total_bytes) {
3682 num_bytes = min_t(u32, 32*1024, total_bytes);
3683
3684 total_bytes -= num_bytes;
3685
3686 if (!total_bytes) {
3687 if (optype == OPTYPE_PHY_FW)
3688 flash_op = FLASHROM_OPER_PHY_FLASH;
3689 else
3690 flash_op = FLASHROM_OPER_FLASH;
3691 } else {
3692 if (optype == OPTYPE_PHY_FW)
3693 flash_op = FLASHROM_OPER_PHY_SAVE;
3694 else
3695 flash_op = FLASHROM_OPER_SAVE;
3696 }
3697
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003698 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003699 img += num_bytes;
3700 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303701 flash_op, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003702 if (status) {
3703 if (status == ILLEGAL_IOCTL_REQ &&
3704 optype == OPTYPE_PHY_FW)
3705 break;
3706 dev_err(&adapter->pdev->dev,
3707 "cmd to write to flash rom failed.\n");
3708 return status;
3709 }
3710 }
3711 return 0;
3712}
3713
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003714/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003715static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303716 const struct firmware *fw,
3717 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003718{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003719 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003720 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003721 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003722 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003723 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003724 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003725
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003726 struct flash_comp gen3_flash_types[] = {
3727 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3728 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3729 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3730 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3731 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3732 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3733 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3734 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3735 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3736 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3737 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3738 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3739 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3740 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3741 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3742 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3743 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3744 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3745 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3746 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003747 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003748
3749 struct flash_comp gen2_flash_types[] = {
3750 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3751 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3752 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3753 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3754 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3755 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3756 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3757 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3758 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3759 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3760 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3761 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3762 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3763 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3764 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3765 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003766 };
3767
Sathya Perlaca34fe32012-11-06 17:48:56 +00003768 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003769 pflashcomp = gen3_flash_types;
3770 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003771 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003772 } else {
3773 pflashcomp = gen2_flash_types;
3774 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003775 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003776 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003777
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003778 /* Get flash section info*/
3779 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3780 if (!fsec) {
3781 dev_err(&adapter->pdev->dev,
3782 "Invalid Cookie. UFI corrupted ?\n");
3783 return -1;
3784 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003785 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003786 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003787 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003788
3789 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3790 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3791 continue;
3792
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003793 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3794 !phy_flashing_required(adapter))
3795 continue;
3796
3797 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3798 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303799 pflashcomp[i].offset,
3800 pflashcomp[i].size,
3801 filehdr_size +
3802 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003803 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003804 continue;
3805 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003806
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003807 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003808 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003809 if (p + pflashcomp[i].size > fw->data + fw->size)
3810 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003811
3812 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303813 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003814 if (status) {
3815 dev_err(&adapter->pdev->dev,
3816 "Flashing section type %d failed.\n",
3817 pflashcomp[i].img_type);
3818 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003819 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003820 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003821 return 0;
3822}
3823
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003824static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303825 const struct firmware *fw,
3826 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003827{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003828 int status = 0, i, filehdr_size = 0;
3829 int img_offset, img_size, img_optype, redboot;
3830 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3831 const u8 *p = fw->data;
3832 struct flash_section_info *fsec = NULL;
3833
3834 filehdr_size = sizeof(struct flash_file_hdr_g3);
3835 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3836 if (!fsec) {
3837 dev_err(&adapter->pdev->dev,
3838 "Invalid Cookie. UFI corrupted ?\n");
3839 return -1;
3840 }
3841
3842 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3843 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3844 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3845
3846 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3847 case IMAGE_FIRMWARE_iSCSI:
3848 img_optype = OPTYPE_ISCSI_ACTIVE;
3849 break;
3850 case IMAGE_BOOT_CODE:
3851 img_optype = OPTYPE_REDBOOT;
3852 break;
3853 case IMAGE_OPTION_ROM_ISCSI:
3854 img_optype = OPTYPE_BIOS;
3855 break;
3856 case IMAGE_OPTION_ROM_PXE:
3857 img_optype = OPTYPE_PXE_BIOS;
3858 break;
3859 case IMAGE_OPTION_ROM_FCoE:
3860 img_optype = OPTYPE_FCOE_BIOS;
3861 break;
3862 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3863 img_optype = OPTYPE_ISCSI_BACKUP;
3864 break;
3865 case IMAGE_NCSI:
3866 img_optype = OPTYPE_NCSI_FW;
3867 break;
3868 default:
3869 continue;
3870 }
3871
3872 if (img_optype == OPTYPE_REDBOOT) {
3873 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303874 img_offset, img_size,
3875 filehdr_size +
3876 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003877 if (!redboot)
3878 continue;
3879 }
3880
3881 p = fw->data;
3882 p += filehdr_size + img_offset + img_hdrs_size;
3883 if (p + img_size > fw->data + fw->size)
3884 return -1;
3885
3886 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3887 if (status) {
3888 dev_err(&adapter->pdev->dev,
3889 "Flashing section type %d failed.\n",
3890 fsec->fsec_entry[i].type);
3891 return status;
3892 }
3893 }
3894 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003895}
3896
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003897static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303898 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003899{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003900#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3901#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3902 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003903 const u8 *data_ptr = NULL;
3904 u8 *dest_image_ptr = NULL;
3905 size_t image_size = 0;
3906 u32 chunk_size = 0;
3907 u32 data_written = 0;
3908 u32 offset = 0;
3909 int status = 0;
3910 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003911 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003912
3913 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3914 dev_err(&adapter->pdev->dev,
3915 "FW Image not properly aligned. "
3916 "Length must be 4 byte aligned.\n");
3917 status = -EINVAL;
3918 goto lancer_fw_exit;
3919 }
3920
3921 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3922 + LANCER_FW_DOWNLOAD_CHUNK;
3923 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003924 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003925 if (!flash_cmd.va) {
3926 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003927 goto lancer_fw_exit;
3928 }
3929
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003930 dest_image_ptr = flash_cmd.va +
3931 sizeof(struct lancer_cmd_req_write_object);
3932 image_size = fw->size;
3933 data_ptr = fw->data;
3934
3935 while (image_size) {
3936 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3937
3938 /* Copy the image chunk content. */
3939 memcpy(dest_image_ptr, data_ptr, chunk_size);
3940
3941 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003942 chunk_size, offset,
3943 LANCER_FW_DOWNLOAD_LOCATION,
3944 &data_written, &change_status,
3945 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003946 if (status)
3947 break;
3948
3949 offset += data_written;
3950 data_ptr += data_written;
3951 image_size -= data_written;
3952 }
3953
3954 if (!status) {
3955 /* Commit the FW written */
3956 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003957 0, offset,
3958 LANCER_FW_DOWNLOAD_LOCATION,
3959 &data_written, &change_status,
3960 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003961 }
3962
3963 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05303964 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003965 if (status) {
3966 dev_err(&adapter->pdev->dev,
3967 "Firmware load error. "
3968 "Status code: 0x%x Additional Status: 0x%x\n",
3969 status, add_status);
3970 goto lancer_fw_exit;
3971 }
3972
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003973 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303974 dev_info(&adapter->pdev->dev,
3975 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003976 status = lancer_physdev_ctrl(adapter,
3977 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003978 if (status) {
3979 dev_err(&adapter->pdev->dev,
3980 "Adapter busy for FW reset.\n"
3981 "New FW will not be active.\n");
3982 goto lancer_fw_exit;
3983 }
3984 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05303985 dev_err(&adapter->pdev->dev,
3986 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003987 }
3988
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3990lancer_fw_exit:
3991 return status;
3992}
3993
Sathya Perlaca34fe32012-11-06 17:48:56 +00003994#define UFI_TYPE2 2
3995#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003996#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003997#define UFI_TYPE4 4
3998static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003999 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004000{
4001 if (fhdr == NULL)
4002 goto be_get_ufi_exit;
4003
Sathya Perlaca34fe32012-11-06 17:48:56 +00004004 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4005 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004006 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4007 if (fhdr->asic_type_rev == 0x10)
4008 return UFI_TYPE3R;
4009 else
4010 return UFI_TYPE3;
4011 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004012 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004013
4014be_get_ufi_exit:
4015 dev_err(&adapter->pdev->dev,
4016 "UFI and Interface are not compatible for flashing\n");
4017 return -1;
4018}
4019
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004020static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4021{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004022 struct flash_file_hdr_g3 *fhdr3;
4023 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004024 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004025 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004026 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004027
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004028 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004029 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4030 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004031 if (!flash_cmd.va) {
4032 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004033 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004034 }
4035
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004036 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004037 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004038
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004039 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004040
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004041 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4042 for (i = 0; i < num_imgs; i++) {
4043 img_hdr_ptr = (struct image_hdr *)(fw->data +
4044 (sizeof(struct flash_file_hdr_g3) +
4045 i * sizeof(struct image_hdr)));
4046 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004047 switch (ufi_type) {
4048 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004049 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304050 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004051 break;
4052 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004053 status = be_flash_BEx(adapter, fw, &flash_cmd,
4054 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004055 break;
4056 case UFI_TYPE3:
4057 /* Do not flash this ufi on BE3-R cards */
4058 if (adapter->asic_rev < 0x10)
4059 status = be_flash_BEx(adapter, fw,
4060 &flash_cmd,
4061 num_imgs);
4062 else {
4063 status = -1;
4064 dev_err(&adapter->pdev->dev,
4065 "Can't load BE3 UFI on BE3R\n");
4066 }
4067 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004068 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004069 }
4070
Sathya Perlaca34fe32012-11-06 17:48:56 +00004071 if (ufi_type == UFI_TYPE2)
4072 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004073 else if (ufi_type == -1)
4074 status = -1;
4075
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004076 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4077 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004078 if (status) {
4079 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004080 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004081 }
4082
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004083 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004084
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004085be_fw_exit:
4086 return status;
4087}
4088
4089int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4090{
4091 const struct firmware *fw;
4092 int status;
4093
4094 if (!netif_running(adapter->netdev)) {
4095 dev_err(&adapter->pdev->dev,
4096 "Firmware load not allowed (interface is down)\n");
4097 return -1;
4098 }
4099
4100 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4101 if (status)
4102 goto fw_exit;
4103
4104 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4105
4106 if (lancer_chip(adapter))
4107 status = lancer_fw_download(adapter, fw);
4108 else
4109 status = be_fw_download(adapter, fw);
4110
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004111 if (!status)
4112 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4113 adapter->fw_on_flash);
4114
Ajit Khaparde84517482009-09-04 03:12:16 +00004115fw_exit:
4116 release_firmware(fw);
4117 return status;
4118}
4119
Sathya Perla748b5392014-05-09 13:29:13 +05304120static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004121{
4122 struct be_adapter *adapter = netdev_priv(dev);
4123 struct nlattr *attr, *br_spec;
4124 int rem;
4125 int status = 0;
4126 u16 mode = 0;
4127
4128 if (!sriov_enabled(adapter))
4129 return -EOPNOTSUPP;
4130
4131 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4132
4133 nla_for_each_nested(attr, br_spec, rem) {
4134 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4135 continue;
4136
4137 mode = nla_get_u16(attr);
4138 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4139 return -EINVAL;
4140
4141 status = be_cmd_set_hsw_config(adapter, 0, 0,
4142 adapter->if_handle,
4143 mode == BRIDGE_MODE_VEPA ?
4144 PORT_FWD_TYPE_VEPA :
4145 PORT_FWD_TYPE_VEB);
4146 if (status)
4147 goto err;
4148
4149 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4150 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4151
4152 return status;
4153 }
4154err:
4155 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4156 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4157
4158 return status;
4159}
4160
4161static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304162 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004163{
4164 struct be_adapter *adapter = netdev_priv(dev);
4165 int status = 0;
4166 u8 hsw_mode;
4167
4168 if (!sriov_enabled(adapter))
4169 return 0;
4170
4171 /* BE and Lancer chips support VEB mode only */
4172 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4173 hsw_mode = PORT_FWD_TYPE_VEB;
4174 } else {
4175 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4176 adapter->if_handle, &hsw_mode);
4177 if (status)
4178 return 0;
4179 }
4180
4181 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4182 hsw_mode == PORT_FWD_TYPE_VEPA ?
4183 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4184}
4185
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304186#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304187static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4188 __be16 port)
4189{
4190 struct be_adapter *adapter = netdev_priv(netdev);
4191 struct device *dev = &adapter->pdev->dev;
4192 int status;
4193
4194 if (lancer_chip(adapter) || BEx_chip(adapter))
4195 return;
4196
4197 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4198 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4199 be16_to_cpu(port));
4200 dev_info(dev,
4201 "Only one UDP port supported for VxLAN offloads\n");
4202 return;
4203 }
4204
4205 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4206 OP_CONVERT_NORMAL_TO_TUNNEL);
4207 if (status) {
4208 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4209 goto err;
4210 }
4211
4212 status = be_cmd_set_vxlan_port(adapter, port);
4213 if (status) {
4214 dev_warn(dev, "Failed to add VxLAN port\n");
4215 goto err;
4216 }
4217 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4218 adapter->vxlan_port = port;
4219
4220 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4221 be16_to_cpu(port));
4222 return;
4223err:
4224 be_disable_vxlan_offloads(adapter);
4225 return;
4226}
4227
4228static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4229 __be16 port)
4230{
4231 struct be_adapter *adapter = netdev_priv(netdev);
4232
4233 if (lancer_chip(adapter) || BEx_chip(adapter))
4234 return;
4235
4236 if (adapter->vxlan_port != port)
4237 return;
4238
4239 be_disable_vxlan_offloads(adapter);
4240
4241 dev_info(&adapter->pdev->dev,
4242 "Disabled VxLAN offloads for UDP port %d\n",
4243 be16_to_cpu(port));
4244}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304245#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304246
stephen hemmingere5686ad2012-01-05 19:10:25 +00004247static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004248 .ndo_open = be_open,
4249 .ndo_stop = be_close,
4250 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004251 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004252 .ndo_set_mac_address = be_mac_addr_set,
4253 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004254 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004256 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4257 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004258 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004259 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004260 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004261 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304262 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004263#ifdef CONFIG_NET_POLL_CONTROLLER
4264 .ndo_poll_controller = be_netpoll,
4265#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004266 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4267 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304268#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304269 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304270#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304271#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304272 .ndo_add_vxlan_port = be_add_vxlan_port,
4273 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304274#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275};
4276
4277static void be_netdev_init(struct net_device *netdev)
4278{
4279 struct be_adapter *adapter = netdev_priv(netdev);
4280
Sathya Perlac9c47142014-03-27 10:46:19 +05304281 if (skyhawk_chip(adapter)) {
4282 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4283 NETIF_F_TSO | NETIF_F_TSO6 |
4284 NETIF_F_GSO_UDP_TUNNEL;
4285 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4286 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004287 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004288 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004289 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004290 if (be_multi_rxq(adapter))
4291 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004292
4293 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004294 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004295
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004296 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004297 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004298
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004299 netdev->priv_flags |= IFF_UNICAST_FLT;
4300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004301 netdev->flags |= IFF_MULTICAST;
4302
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004303 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004304
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004305 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004306
4307 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004308}
4309
4310static void be_unmap_pci_bars(struct be_adapter *adapter)
4311{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004312 if (adapter->csr)
4313 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004314 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004315 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004316}
4317
Sathya Perlace66f782012-11-06 17:48:58 +00004318static int db_bar(struct be_adapter *adapter)
4319{
4320 if (lancer_chip(adapter) || !be_physfn(adapter))
4321 return 0;
4322 else
4323 return 4;
4324}
4325
4326static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004327{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004328 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004329 adapter->roce_db.size = 4096;
4330 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4331 db_bar(adapter));
4332 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4333 db_bar(adapter));
4334 }
Parav Pandit045508a2012-03-26 14:27:13 +00004335 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004336}
4337
4338static int be_map_pci_bars(struct be_adapter *adapter)
4339{
4340 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004341
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004342 if (BEx_chip(adapter) && be_physfn(adapter)) {
4343 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4344 if (adapter->csr == NULL)
4345 return -ENOMEM;
4346 }
4347
Sathya Perlace66f782012-11-06 17:48:58 +00004348 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004349 if (addr == NULL)
4350 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004351 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004352
4353 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004354 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004356pci_map_err:
4357 be_unmap_pci_bars(adapter);
4358 return -ENOMEM;
4359}
4360
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004361static void be_ctrl_cleanup(struct be_adapter *adapter)
4362{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004363 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004364
4365 be_unmap_pci_bars(adapter);
4366
4367 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004368 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4369 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004370
Sathya Perla5b8821b2011-08-02 19:57:44 +00004371 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004372 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004373 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4374 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004375}
4376
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004377static int be_ctrl_init(struct be_adapter *adapter)
4378{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004379 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4380 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004381 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004382 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004383 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004384
Sathya Perlace66f782012-11-06 17:48:58 +00004385 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4386 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4387 SLI_INTF_FAMILY_SHIFT;
4388 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004390 status = be_map_pci_bars(adapter);
4391 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004392 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004393
4394 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004395 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4396 mbox_mem_alloc->size,
4397 &mbox_mem_alloc->dma,
4398 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004399 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004400 status = -ENOMEM;
4401 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004402 }
4403 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4404 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4405 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4406 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004407
Sathya Perla5b8821b2011-08-02 19:57:44 +00004408 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004409 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4410 rx_filter->size, &rx_filter->dma,
4411 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004412 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004413 status = -ENOMEM;
4414 goto free_mbox;
4415 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004416
Ivan Vecera29849612010-12-14 05:43:19 +00004417 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004418 spin_lock_init(&adapter->mcc_lock);
4419 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004420
Suresh Reddy5eeff632014-01-06 13:02:24 +05304421 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004422 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004423 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004424
4425free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004426 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4427 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004428
4429unmap_pci_bars:
4430 be_unmap_pci_bars(adapter);
4431
4432done:
4433 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004434}
4435
4436static void be_stats_cleanup(struct be_adapter *adapter)
4437{
Sathya Perla3abcded2010-10-03 22:12:27 -07004438 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004439
4440 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004441 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4442 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004443}
4444
4445static int be_stats_init(struct be_adapter *adapter)
4446{
Sathya Perla3abcded2010-10-03 22:12:27 -07004447 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004448
Sathya Perlaca34fe32012-11-06 17:48:56 +00004449 if (lancer_chip(adapter))
4450 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4451 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004452 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004453 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004454 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004455 else
4456 /* ALL non-BE ASICs */
4457 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004458
Joe Perchesede23fa82013-08-26 22:45:23 -07004459 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4460 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004461 if (cmd->va == NULL)
4462 return -1;
4463 return 0;
4464}
4465
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004466static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004467{
4468 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004469
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004470 if (!adapter)
4471 return;
4472
Parav Pandit045508a2012-03-26 14:27:13 +00004473 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004474 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004475
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004476 cancel_delayed_work_sync(&adapter->func_recovery_work);
4477
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004478 unregister_netdev(adapter->netdev);
4479
Sathya Perla5fb379e2009-06-18 00:02:59 +00004480 be_clear(adapter);
4481
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004482 /* tell fw we're done with firing cmds */
4483 be_cmd_fw_clean(adapter);
4484
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004485 be_stats_cleanup(adapter);
4486
4487 be_ctrl_cleanup(adapter);
4488
Sathya Perlad6b6d982012-09-05 01:56:48 +00004489 pci_disable_pcie_error_reporting(pdev);
4490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004491 pci_release_regions(pdev);
4492 pci_disable_device(pdev);
4493
4494 free_netdev(adapter->netdev);
4495}
4496
Sathya Perla39f1d942012-05-08 19:41:24 +00004497static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004498{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304499 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004500
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004501 status = be_cmd_get_cntl_attributes(adapter);
4502 if (status)
4503 return status;
4504
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004505 /* Must be a power of 2 or else MODULO will BUG_ON */
4506 adapter->be_get_temp_freq = 64;
4507
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304508 if (BEx_chip(adapter)) {
4509 level = be_cmd_get_fw_log_level(adapter);
4510 adapter->msg_enable =
4511 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4512 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004513
Sathya Perla92bf14a2013-08-27 16:57:32 +05304514 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004515 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004516}
4517
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004518static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004519{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004520 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004521 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004522
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004523 status = lancer_test_and_set_rdy_state(adapter);
4524 if (status)
4525 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004526
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004527 if (netif_running(adapter->netdev))
4528 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004529
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004530 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004531
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004532 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004533
4534 status = be_setup(adapter);
4535 if (status)
4536 goto err;
4537
4538 if (netif_running(adapter->netdev)) {
4539 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004540 if (status)
4541 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004542 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004543
Somnath Kotur4bebb562013-12-05 12:07:55 +05304544 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004545 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004546err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004547 if (status == -EAGAIN)
4548 dev_err(dev, "Waiting for resource provisioning\n");
4549 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304550 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004551
4552 return status;
4553}
4554
4555static void be_func_recovery_task(struct work_struct *work)
4556{
4557 struct be_adapter *adapter =
4558 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004559 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004560
4561 be_detect_error(adapter);
4562
4563 if (adapter->hw_error && lancer_chip(adapter)) {
4564
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004565 rtnl_lock();
4566 netif_device_detach(adapter->netdev);
4567 rtnl_unlock();
4568
4569 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004570 if (!status)
4571 netif_device_attach(adapter->netdev);
4572 }
4573
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004574 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4575 * no need to attempt further recovery.
4576 */
4577 if (!status || status == -EAGAIN)
4578 schedule_delayed_work(&adapter->func_recovery_work,
4579 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004580}
4581
4582static void be_worker(struct work_struct *work)
4583{
4584 struct be_adapter *adapter =
4585 container_of(work, struct be_adapter, work.work);
4586 struct be_rx_obj *rxo;
4587 int i;
4588
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004589 /* when interrupts are not yet enabled, just reap any pending
4590 * mcc completions */
4591 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004592 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004593 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004594 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004595 goto reschedule;
4596 }
4597
4598 if (!adapter->stats_cmd_sent) {
4599 if (lancer_chip(adapter))
4600 lancer_cmd_get_pport_stats(adapter,
4601 &adapter->stats_cmd);
4602 else
4603 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4604 }
4605
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304606 if (be_physfn(adapter) &&
4607 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004608 be_cmd_get_die_temperature(adapter);
4609
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004610 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304611 /* Replenish RX-queues starved due to memory
4612 * allocation failures.
4613 */
4614 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004615 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004616 }
4617
Sathya Perla2632baf2013-10-01 16:00:00 +05304618 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004619
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004620reschedule:
4621 adapter->work_counter++;
4622 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4623}
4624
Sathya Perla257a3fe2013-06-14 15:54:51 +05304625/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004626static bool be_reset_required(struct be_adapter *adapter)
4627{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304628 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004629}
4630
Sathya Perlad3791422012-09-28 04:39:44 +00004631static char *mc_name(struct be_adapter *adapter)
4632{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304633 char *str = ""; /* default */
4634
4635 switch (adapter->mc_type) {
4636 case UMC:
4637 str = "UMC";
4638 break;
4639 case FLEX10:
4640 str = "FLEX10";
4641 break;
4642 case vNIC1:
4643 str = "vNIC-1";
4644 break;
4645 case nPAR:
4646 str = "nPAR";
4647 break;
4648 case UFP:
4649 str = "UFP";
4650 break;
4651 case vNIC2:
4652 str = "vNIC-2";
4653 break;
4654 default:
4655 str = "";
4656 }
4657
4658 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004659}
4660
4661static inline char *func_name(struct be_adapter *adapter)
4662{
4663 return be_physfn(adapter) ? "PF" : "VF";
4664}
4665
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004666static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004667{
4668 int status = 0;
4669 struct be_adapter *adapter;
4670 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004671 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004672
4673 status = pci_enable_device(pdev);
4674 if (status)
4675 goto do_none;
4676
4677 status = pci_request_regions(pdev, DRV_NAME);
4678 if (status)
4679 goto disable_dev;
4680 pci_set_master(pdev);
4681
Sathya Perla7f640062012-06-05 19:37:20 +00004682 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004683 if (netdev == NULL) {
4684 status = -ENOMEM;
4685 goto rel_reg;
4686 }
4687 adapter = netdev_priv(netdev);
4688 adapter->pdev = pdev;
4689 pci_set_drvdata(pdev, adapter);
4690 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004691 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004692
Russell King4c15c242013-06-26 23:49:11 +01004693 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004694 if (!status) {
4695 netdev->features |= NETIF_F_HIGHDMA;
4696 } else {
Russell King4c15c242013-06-26 23:49:11 +01004697 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004698 if (status) {
4699 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4700 goto free_netdev;
4701 }
4702 }
4703
Ajit Khapardeea58c182013-10-18 16:06:24 -05004704 if (be_physfn(adapter)) {
4705 status = pci_enable_pcie_error_reporting(pdev);
4706 if (!status)
4707 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4708 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004709
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004710 status = be_ctrl_init(adapter);
4711 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004712 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004713
Sathya Perla2243e2e2009-11-22 22:02:03 +00004714 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004715 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004716 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004717 if (status)
4718 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004719 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004720
Sathya Perla39f1d942012-05-08 19:41:24 +00004721 if (be_reset_required(adapter)) {
4722 status = be_cmd_reset_function(adapter);
4723 if (status)
4724 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004725
Kalesh AP2d177be2013-04-28 22:22:29 +00004726 /* Wait for interrupts to quiesce after an FLR */
4727 msleep(100);
4728 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004729
4730 /* Allow interrupts for other ULPs running on NIC function */
4731 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004732
Kalesh AP2d177be2013-04-28 22:22:29 +00004733 /* tell fw we're ready to fire cmds */
4734 status = be_cmd_fw_init(adapter);
4735 if (status)
4736 goto ctrl_clean;
4737
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004738 status = be_stats_init(adapter);
4739 if (status)
4740 goto ctrl_clean;
4741
Sathya Perla39f1d942012-05-08 19:41:24 +00004742 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004743 if (status)
4744 goto stats_clean;
4745
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004746 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004747 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004748 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004749
Sathya Perla5fb379e2009-06-18 00:02:59 +00004750 status = be_setup(adapter);
4751 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004752 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004753
Sathya Perla3abcded2010-10-03 22:12:27 -07004754 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004755 status = register_netdev(netdev);
4756 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004757 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004758
Parav Pandit045508a2012-03-26 14:27:13 +00004759 be_roce_dev_add(adapter);
4760
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004761 schedule_delayed_work(&adapter->func_recovery_work,
4762 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004763
4764 be_cmd_query_port_name(adapter, &port_name);
4765
Sathya Perlad3791422012-09-28 04:39:44 +00004766 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4767 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004768
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004769 return 0;
4770
Sathya Perla5fb379e2009-06-18 00:02:59 +00004771unsetup:
4772 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004773stats_clean:
4774 be_stats_cleanup(adapter);
4775ctrl_clean:
4776 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004777free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004778 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004779rel_reg:
4780 pci_release_regions(pdev);
4781disable_dev:
4782 pci_disable_device(pdev);
4783do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004784 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004785 return status;
4786}
4787
4788static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4789{
4790 struct be_adapter *adapter = pci_get_drvdata(pdev);
4791 struct net_device *netdev = adapter->netdev;
4792
Suresh Reddy76a9e082014-01-15 13:23:40 +05304793 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004794 be_setup_wol(adapter, true);
4795
Ajit Khaparded4360d62013-11-22 12:51:09 -06004796 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004797 cancel_delayed_work_sync(&adapter->func_recovery_work);
4798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004799 netif_device_detach(netdev);
4800 if (netif_running(netdev)) {
4801 rtnl_lock();
4802 be_close(netdev);
4803 rtnl_unlock();
4804 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004805 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004806
4807 pci_save_state(pdev);
4808 pci_disable_device(pdev);
4809 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4810 return 0;
4811}
4812
4813static int be_resume(struct pci_dev *pdev)
4814{
4815 int status = 0;
4816 struct be_adapter *adapter = pci_get_drvdata(pdev);
4817 struct net_device *netdev = adapter->netdev;
4818
4819 netif_device_detach(netdev);
4820
4821 status = pci_enable_device(pdev);
4822 if (status)
4823 return status;
4824
Yijing Wang1ca01512013-06-27 20:53:42 +08004825 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004826 pci_restore_state(pdev);
4827
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304828 status = be_fw_wait_ready(adapter);
4829 if (status)
4830 return status;
4831
Ajit Khaparded4360d62013-11-22 12:51:09 -06004832 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004833 /* tell fw we're ready to fire cmds */
4834 status = be_cmd_fw_init(adapter);
4835 if (status)
4836 return status;
4837
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004838 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004839 if (netif_running(netdev)) {
4840 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004841 be_open(netdev);
4842 rtnl_unlock();
4843 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004844
4845 schedule_delayed_work(&adapter->func_recovery_work,
4846 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004847 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004848
Suresh Reddy76a9e082014-01-15 13:23:40 +05304849 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004850 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004851
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004852 return 0;
4853}
4854
Sathya Perla82456b02010-02-17 01:35:37 +00004855/*
4856 * An FLR will stop BE from DMAing any data.
4857 */
4858static void be_shutdown(struct pci_dev *pdev)
4859{
4860 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004861
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004862 if (!adapter)
4863 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004864
Sathya Perla0f4a6822011-03-21 20:49:28 +00004865 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004866 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004867
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004868 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004869
Ajit Khaparde57841862011-04-06 18:08:43 +00004870 be_cmd_reset_function(adapter);
4871
Sathya Perla82456b02010-02-17 01:35:37 +00004872 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004873}
4874
Sathya Perlacf588472010-02-14 21:22:01 +00004875static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05304876 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00004877{
4878 struct be_adapter *adapter = pci_get_drvdata(pdev);
4879 struct net_device *netdev = adapter->netdev;
4880
4881 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4882
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004883 if (!adapter->eeh_error) {
4884 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004885
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004886 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004887
Sathya Perlacf588472010-02-14 21:22:01 +00004888 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004889 netif_device_detach(netdev);
4890 if (netif_running(netdev))
4891 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004892 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004893
4894 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004895 }
Sathya Perlacf588472010-02-14 21:22:01 +00004896
4897 if (state == pci_channel_io_perm_failure)
4898 return PCI_ERS_RESULT_DISCONNECT;
4899
4900 pci_disable_device(pdev);
4901
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004902 /* The error could cause the FW to trigger a flash debug dump.
4903 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004904 * can cause it not to recover; wait for it to finish.
4905 * Wait only for first function as it is needed only once per
4906 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004907 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004908 if (pdev->devfn == 0)
4909 ssleep(30);
4910
Sathya Perlacf588472010-02-14 21:22:01 +00004911 return PCI_ERS_RESULT_NEED_RESET;
4912}
4913
4914static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4915{
4916 struct be_adapter *adapter = pci_get_drvdata(pdev);
4917 int status;
4918
4919 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004920
4921 status = pci_enable_device(pdev);
4922 if (status)
4923 return PCI_ERS_RESULT_DISCONNECT;
4924
4925 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004926 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004927 pci_restore_state(pdev);
4928
4929 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004930 dev_info(&adapter->pdev->dev,
4931 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004932 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004933 if (status)
4934 return PCI_ERS_RESULT_DISCONNECT;
4935
Sathya Perlad6b6d982012-09-05 01:56:48 +00004936 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004937 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004938 return PCI_ERS_RESULT_RECOVERED;
4939}
4940
4941static void be_eeh_resume(struct pci_dev *pdev)
4942{
4943 int status = 0;
4944 struct be_adapter *adapter = pci_get_drvdata(pdev);
4945 struct net_device *netdev = adapter->netdev;
4946
4947 dev_info(&adapter->pdev->dev, "EEH resume\n");
4948
4949 pci_save_state(pdev);
4950
Kalesh AP2d177be2013-04-28 22:22:29 +00004951 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004952 if (status)
4953 goto err;
4954
Kalesh AP2d177be2013-04-28 22:22:29 +00004955 /* tell fw we're ready to fire cmds */
4956 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004957 if (status)
4958 goto err;
4959
Sathya Perlacf588472010-02-14 21:22:01 +00004960 status = be_setup(adapter);
4961 if (status)
4962 goto err;
4963
4964 if (netif_running(netdev)) {
4965 status = be_open(netdev);
4966 if (status)
4967 goto err;
4968 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004969
4970 schedule_delayed_work(&adapter->func_recovery_work,
4971 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004972 netif_device_attach(netdev);
4973 return;
4974err:
4975 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004976}
4977
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004978static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004979 .error_detected = be_eeh_err_detected,
4980 .slot_reset = be_eeh_reset,
4981 .resume = be_eeh_resume,
4982};
4983
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004984static struct pci_driver be_driver = {
4985 .name = DRV_NAME,
4986 .id_table = be_dev_ids,
4987 .probe = be_probe,
4988 .remove = be_remove,
4989 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004990 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004991 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004992 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004993};
4994
4995static int __init be_init_module(void)
4996{
Joe Perches8e95a202009-12-03 07:58:21 +00004997 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4998 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004999 printk(KERN_WARNING DRV_NAME
5000 " : Module param rx_frag_size must be 2048/4096/8192."
5001 " Using 2048\n");
5002 rx_frag_size = 2048;
5003 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005005 return pci_register_driver(&be_driver);
5006}
5007module_init(be_init_module);
5008
5009static void __exit be_exit_module(void)
5010{
5011 pci_unregister_driver(&be_driver);
5012}
5013module_exit(be_exit_module);