blob: e1d445dd856413d2371620bd10444c29b2bed64f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
1114 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1115 goto set_vlan_promisc;
1116 dev_err(&adapter->pdev->dev,
1117 "Setting HW VLAN filtering failed.\n");
1118 } else {
1119 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1120 /* hw VLAN filtering re-enabled. */
1121 status = be_cmd_rx_filter(adapter,
1122 BE_FLAGS_VLAN_PROMISC, OFF);
1123 if (!status) {
1124 dev_info(&adapter->pdev->dev,
1125 "Disabling VLAN Promiscuous mode.\n");
1126 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 }
1128 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001130
Sathya Perlab31c50a2009-09-17 10:30:13 -07001131 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001132
1133set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301134 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1135 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136
1137 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1138 if (!status) {
1139 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001140 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1141 } else
1142 dev_err(&adapter->pdev->dev,
1143 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001144 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145}
1146
Patrick McHardy80d5c362013-04-19 02:04:28 +00001147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001150 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301154 return status;
1155
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301156 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301157 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001158
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301159 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301160 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001161
Somnath Kotura6b74e02014-01-21 15:50:55 +05301162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301165 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301166 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301167
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001168 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169}
1170
Patrick McHardy80d5c362013-04-19 02:04:28 +00001171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001174 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
1178 goto ret;
1179
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301180 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301181 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001182 if (!status)
1183 adapter->vlans_added--;
1184 else
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301185 set_bit(vid, adapter->vids);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001186ret:
1187 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188}
1189
Somnath kotur7ad09452014-03-03 14:24:43 +05301190static void be_clear_promisc(struct be_adapter *adapter)
1191{
1192 adapter->promiscuous = false;
1193 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1194
1195 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1196}
1197
Sathya Perlaa54769f2011-10-24 02:45:00 +00001198static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001201 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
1203 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001204 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001205 adapter->promiscuous = true;
1206 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001208
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001209 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301211 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001212 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001213 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001214 }
1215
Sathya Perlae7b909a2009-11-22 22:01:10 +00001216 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001217 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301218 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001219 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001220 goto done;
1221 }
1222
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001223 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1224 struct netdev_hw_addr *ha;
1225 int i = 1; /* First slot is claimed by the Primary MAC */
1226
1227 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1228 be_cmd_pmac_del(adapter, adapter->if_handle,
1229 adapter->pmac_id[i], 0);
1230 }
1231
Sathya Perla92bf14a2013-08-27 16:57:32 +05301232 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001233 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1234 adapter->promiscuous = true;
1235 goto done;
1236 }
1237
1238 netdev_for_each_uc_addr(ha, adapter->netdev) {
1239 adapter->uc_macs++; /* First slot is for Primary MAC */
1240 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1241 adapter->if_handle,
1242 &adapter->pmac_id[adapter->uc_macs], 0);
1243 }
1244 }
1245
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001246 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1247
1248 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1249 if (status) {
Sathya Perla748b5392014-05-09 13:29:13 +05301250 dev_info(&adapter->pdev->dev,
1251 "Exhausted multicast HW filters.\n");
1252 dev_info(&adapter->pdev->dev,
1253 "Disabling HW multicast filtering.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001254 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001256done:
1257 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258}
1259
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001264 int status;
1265
Sathya Perla11ac75e2011-12-13 00:58:50 +00001266 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001267 return -EPERM;
1268
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001270 return -EINVAL;
1271
Sathya Perla3175d8c2013-07-23 15:25:03 +05301272 if (BEx_chip(adapter)) {
1273 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1274 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001275
Sathya Perla11ac75e2011-12-13 00:58:50 +00001276 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1277 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301278 } else {
1279 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1280 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001281 }
1282
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001283 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001284 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301285 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001286 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001287 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001288
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001289 return status;
1290}
1291
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001292static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301293 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001296 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001297
Sathya Perla11ac75e2011-12-13 00:58:50 +00001298 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001299 return -EPERM;
1300
Sathya Perla11ac75e2011-12-13 00:58:50 +00001301 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302 return -EINVAL;
1303
1304 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001305 vi->max_tx_rate = vf_cfg->tx_rate;
1306 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001307 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1308 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001309 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301310 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311
1312 return 0;
1313}
1314
Sathya Perla748b5392014-05-09 13:29:13 +05301315static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001316{
1317 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001318 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001319 int status = 0;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001322 return -EPERM;
1323
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001324 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325 return -EINVAL;
1326
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001327 if (vlan || qos) {
1328 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301329 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001330 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1331 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001332 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001333 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301334 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1335 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001336 }
1337
Somnath Koturc5022242014-03-03 14:24:20 +05301338 if (!status)
1339 vf_cfg->vlan_tag = vlan;
1340 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301342 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001343 return status;
1344}
1345
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001346static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1347 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001348{
1349 struct be_adapter *adapter = netdev_priv(netdev);
1350 int status = 0;
1351
Sathya Perla11ac75e2011-12-13 00:58:50 +00001352 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001353 return -EPERM;
1354
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001355 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001356 return -EINVAL;
1357
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001358 if (min_tx_rate)
1359 return -EINVAL;
1360
1361 if (max_tx_rate < 100 || max_tx_rate > 10000) {
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001362 dev_err(&adapter->pdev->dev,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001363 "max tx rate must be between 100 and 10000 Mbps\n");
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001364 return -EINVAL;
1365 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001366
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001367 status = be_cmd_config_qos(adapter, max_tx_rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001369 dev_err(&adapter->pdev->dev,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001370 "max tx rate %d on VF %d failed\n", max_tx_rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001371 else
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001372 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001373 return status;
1374}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301375static int be_set_vf_link_state(struct net_device *netdev, int vf,
1376 int link_state)
1377{
1378 struct be_adapter *adapter = netdev_priv(netdev);
1379 int status;
1380
1381 if (!sriov_enabled(adapter))
1382 return -EPERM;
1383
1384 if (vf >= adapter->num_vfs)
1385 return -EINVAL;
1386
1387 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1388 if (!status)
1389 adapter->vf_cfg[vf].plink_tracking = link_state;
1390
1391 return status;
1392}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001393
Sathya Perla2632baf2013-10-01 16:00:00 +05301394static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1395 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396{
Sathya Perla2632baf2013-10-01 16:00:00 +05301397 aic->rx_pkts_prev = rx_pkts;
1398 aic->tx_reqs_prev = tx_pkts;
1399 aic->jiffies = now;
1400}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001401
Sathya Perla2632baf2013-10-01 16:00:00 +05301402static void be_eqd_update(struct be_adapter *adapter)
1403{
1404 struct be_set_eqd set_eqd[MAX_EVT_QS];
1405 int eqd, i, num = 0, start;
1406 struct be_aic_obj *aic;
1407 struct be_eq_obj *eqo;
1408 struct be_rx_obj *rxo;
1409 struct be_tx_obj *txo;
1410 u64 rx_pkts, tx_pkts;
1411 ulong now;
1412 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001413
Sathya Perla2632baf2013-10-01 16:00:00 +05301414 for_all_evt_queues(adapter, eqo, i) {
1415 aic = &adapter->aic_obj[eqo->idx];
1416 if (!aic->enable) {
1417 if (aic->jiffies)
1418 aic->jiffies = 0;
1419 eqd = aic->et_eqd;
1420 goto modify_eqd;
1421 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422
Sathya Perla2632baf2013-10-01 16:00:00 +05301423 rxo = &adapter->rx_obj[eqo->idx];
1424 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001425 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301426 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001427 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001428
Sathya Perla2632baf2013-10-01 16:00:00 +05301429 txo = &adapter->tx_obj[eqo->idx];
1430 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001431 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301432 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001433 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001434
Sathya Perla4097f662009-03-24 16:40:13 -07001435
Sathya Perla2632baf2013-10-01 16:00:00 +05301436 /* Skip, if wrapped around or first calculation */
1437 now = jiffies;
1438 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1439 rx_pkts < aic->rx_pkts_prev ||
1440 tx_pkts < aic->tx_reqs_prev) {
1441 be_aic_update(aic, rx_pkts, tx_pkts, now);
1442 continue;
1443 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001444
Sathya Perla2632baf2013-10-01 16:00:00 +05301445 delta = jiffies_to_msecs(now - aic->jiffies);
1446 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1447 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1448 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001449
Sathya Perla2632baf2013-10-01 16:00:00 +05301450 if (eqd < 8)
1451 eqd = 0;
1452 eqd = min_t(u32, eqd, aic->max_eqd);
1453 eqd = max_t(u32, eqd, aic->min_eqd);
1454
1455 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001456modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301457 if (eqd != aic->prev_eqd) {
1458 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1459 set_eqd[num].eq_id = eqo->q.id;
1460 aic->prev_eqd = eqd;
1461 num++;
1462 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001463 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301464
1465 if (num)
1466 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001467}
1468
Sathya Perla3abcded2010-10-03 22:12:27 -07001469static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301470 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001471{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001472 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001473
Sathya Perlaab1594e2011-07-25 19:10:15 +00001474 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001475 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001476 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001478 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001479 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001480 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001481 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001482 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483}
1484
Sathya Perla2e588f82011-03-11 02:49:26 +00001485static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001486{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001487 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301488 * Also ignore ipcksm for ipv6 pkts
1489 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301491 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001492}
1493
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301494static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001496 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301499 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500
Sathya Perla3abcded2010-10-03 22:12:27 -07001501 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502 BUG_ON(!rx_page_info->page);
1503
Sathya Perlae50287b2014-03-04 12:14:38 +05301504 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001505 dma_unmap_page(&adapter->pdev->dev,
1506 dma_unmap_addr(rx_page_info, bus),
1507 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301508 rx_page_info->last_frag = false;
1509 } else {
1510 dma_sync_single_for_cpu(&adapter->pdev->dev,
1511 dma_unmap_addr(rx_page_info, bus),
1512 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001513 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301515 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 atomic_dec(&rxq->used);
1517 return rx_page_info;
1518}
1519
1520/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001521static void be_rx_compl_discard(struct be_rx_obj *rxo,
1522 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001525 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001527 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301528 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001529 put_page(page_info->page);
1530 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531 }
1532}
1533
1534/*
1535 * skb_fill_rx_data forms a complete skb for an ether frame
1536 * indicated by rxcp.
1537 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001538static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1539 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001542 u16 i, j;
1543 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 u8 *start;
1545
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301546 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 start = page_address(page_info->page) + page_info->page_offset;
1548 prefetch(start);
1549
1550 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 skb->len = curr_frag_len;
1554 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001555 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 /* Complete packet has now been moved to data */
1557 put_page(page_info->page);
1558 skb->data_len = 0;
1559 skb->tail += curr_frag_len;
1560 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001561 hdr_len = ETH_HLEN;
1562 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001564 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 skb_shinfo(skb)->frags[0].page_offset =
1566 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301567 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1568 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001570 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 skb->tail += hdr_len;
1572 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001573 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574
Sathya Perla2e588f82011-03-11 02:49:26 +00001575 if (rxcp->pkt_size <= rx_frag_size) {
1576 BUG_ON(rxcp->num_rcvd != 1);
1577 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578 }
1579
1580 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001581 remaining = rxcp->pkt_size - curr_frag_len;
1582 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301583 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001584 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001586 /* Coalesce all frags from the same physical page in one slot */
1587 if (page_info->page_offset == 0) {
1588 /* Fresh page */
1589 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001590 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001591 skb_shinfo(skb)->frags[j].page_offset =
1592 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001593 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001594 skb_shinfo(skb)->nr_frags++;
1595 } else {
1596 put_page(page_info->page);
1597 }
1598
Eric Dumazet9e903e02011-10-18 21:00:24 +00001599 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600 skb->len += curr_frag_len;
1601 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001602 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001603 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001604 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001606 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607}
1608
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001609/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301610static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001611 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001614 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001616
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001617 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001618 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001619 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001620 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 return;
1622 }
1623
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001626 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001627 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001628 else
1629 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001631 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001632 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001633 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001634 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301635
1636 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301637 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638
Jiri Pirko343e43c2011-08-25 02:50:51 +00001639 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001640 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001641
1642 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643}
1644
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001645/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001646static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1647 struct napi_struct *napi,
1648 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001650 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001652 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 u16 remaining, curr_frag_len;
1654 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001655
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001657 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001658 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001659 return;
1660 }
1661
Sathya Perla2e588f82011-03-11 02:49:26 +00001662 remaining = rxcp->pkt_size;
1663 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301664 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
1666 curr_frag_len = min(remaining, rx_frag_size);
1667
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001668 /* Coalesce all frags from the same physical page in one slot */
1669 if (i == 0 || page_info->page_offset == 0) {
1670 /* First frag or Fresh page */
1671 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001672 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001673 skb_shinfo(skb)->frags[j].page_offset =
1674 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001675 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001676 } else {
1677 put_page(page_info->page);
1678 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001679 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001680 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 memset(page_info, 0, sizeof(*page_info));
1683 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001684 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001686 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001687 skb->len = rxcp->pkt_size;
1688 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001689 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001690 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001691 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001692 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301693
1694 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301695 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001696
Jiri Pirko343e43c2011-08-25 02:50:51 +00001697 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701}
1702
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001703static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1704 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705{
Sathya Perla2e588f82011-03-11 02:49:26 +00001706 rxcp->pkt_size =
1707 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1708 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1709 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1710 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001711 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001712 rxcp->ip_csum =
1713 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1714 rxcp->l4_csum =
1715 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1716 rxcp->ipv6 =
1717 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001718 rxcp->num_rcvd =
1719 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1720 rxcp->pkt_type =
1721 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001722 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001723 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001724 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301725 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001726 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301727 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1728 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001729 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001730 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301731 rxcp->tunneled =
1732 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001733}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001735static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1736 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001737{
1738 rxcp->pkt_size =
1739 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1740 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1741 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1742 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001743 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001744 rxcp->ip_csum =
1745 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1746 rxcp->l4_csum =
1747 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1748 rxcp->ipv6 =
1749 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001750 rxcp->num_rcvd =
1751 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1752 rxcp->pkt_type =
1753 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001754 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001755 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001756 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301757 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001758 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301759 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1760 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001761 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001762 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001763 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1764 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001765}
1766
1767static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1768{
1769 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1770 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1771 struct be_adapter *adapter = rxo->adapter;
1772
1773 /* For checking the valid bit it is Ok to use either definition as the
1774 * valid bit is at the same position in both v0 and v1 Rx compl */
1775 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 return NULL;
1777
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001778 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001779 be_dws_le_to_cpu(compl, sizeof(*compl));
1780
1781 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001783 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001785
Somnath Koture38b1702013-05-29 22:55:56 +00001786 if (rxcp->ip_frag)
1787 rxcp->l4_csum = 0;
1788
Sathya Perla15d72182011-03-21 20:49:26 +00001789 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301790 /* In QNQ modes, if qnq bit is not set, then the packet was
1791 * tagged only with the transparent outer vlan-tag and must
1792 * not be treated as a vlan packet by host
1793 */
1794 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001795 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001796
Sathya Perla15d72182011-03-21 20:49:26 +00001797 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001798 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001799
Somnath Kotur939cf302011-08-18 21:51:49 -07001800 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301801 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001802 rxcp->vlanf = 0;
1803 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001804
1805 /* As the compl has been parsed, reset it; we wont touch it again */
1806 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
Sathya Perla3abcded2010-10-03 22:12:27 -07001808 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809 return rxcp;
1810}
1811
Eric Dumazet1829b082011-03-01 05:48:12 +00001812static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001815
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001817 gfp |= __GFP_COMP;
1818 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819}
1820
1821/*
1822 * Allocate a page, split it to fragments of size rx_frag_size and post as
1823 * receive buffers to BE
1824 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001825static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826{
Sathya Perla3abcded2010-10-03 22:12:27 -07001827 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001828 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001829 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001831 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 struct be_eth_rx_d *rxd;
1833 u64 page_dmaaddr = 0, frag_dmaaddr;
1834 u32 posted, page_offset = 0;
1835
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1838 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001839 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001841 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 break;
1843 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001844 page_dmaaddr = dma_map_page(dev, pagep, 0,
1845 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001846 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001847 if (dma_mapping_error(dev, page_dmaaddr)) {
1848 put_page(pagep);
1849 pagep = NULL;
1850 rx_stats(rxo)->rx_post_fail++;
1851 break;
1852 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301853 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 } else {
1855 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301856 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301858 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860
1861 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301862 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1864 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
1866 /* Any space left in the current big page for another frag? */
1867 if ((page_offset + rx_frag_size + rx_frag_size) >
1868 adapter->big_page_size) {
1869 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301870 page_info->last_frag = true;
1871 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1872 } else {
1873 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001875
1876 prev_page_info = page_info;
1877 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001878 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301880
1881 /* Mark the last frag of a page when we break out of the above loop
1882 * with no more slots available in the RXQ
1883 */
1884 if (pagep) {
1885 prev_page_info->last_frag = true;
1886 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1887 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888
1889 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301891 if (rxo->rx_post_starved)
1892 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001893 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001894 } else if (atomic_read(&rxq->used) == 0) {
1895 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001896 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898}
1899
Sathya Perla5fb379e2009-06-18 00:02:59 +00001900static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1903
1904 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1905 return NULL;
1906
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001907 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1909
1910 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1911
1912 queue_tail_inc(tx_cq);
1913 return txcp;
1914}
1915
Sathya Perla3c8def92011-06-12 20:01:58 +00001916static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301917 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918{
Sathya Perla3c8def92011-06-12 20:01:58 +00001919 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001920 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001921 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001923 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1924 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001926 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001928 sent_skbs[txq->tail] = NULL;
1929
1930 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001931 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001933 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001935 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001936 unmap_tx_frag(&adapter->pdev->dev, wrb,
1937 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001938 unmap_skb_hdr = false;
1939
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 num_wrbs++;
1941 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001942 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001944 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001945 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946}
1947
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948/* Return the number of events in the event queue */
1949static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001950{
1951 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001953
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954 do {
1955 eqe = queue_tail_node(&eqo->q);
1956 if (eqe->evt == 0)
1957 break;
1958
1959 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001960 eqe->evt = 0;
1961 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 queue_tail_inc(&eqo->q);
1963 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001964
1965 return num;
1966}
1967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968/* Leaves the EQ is disarmed state */
1969static void be_eq_clean(struct be_eq_obj *eqo)
1970{
1971 int num = events_get(eqo);
1972
1973 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1974}
1975
1976static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
1978 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 struct be_queue_info *rxq = &rxo->q;
1980 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001981 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001982 struct be_adapter *adapter = rxo->adapter;
1983 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984
Sathya Perlad23e9462012-12-17 19:38:51 +00001985 /* Consume pending rx completions.
1986 * Wait for the flush completion (identified by zero num_rcvd)
1987 * to arrive. Notify CQ even when there are no more CQ entries
1988 * for HW to flush partially coalesced CQ entries.
1989 * In Lancer, there is no need to wait for flush compl.
1990 */
1991 for (;;) {
1992 rxcp = be_rx_compl_get(rxo);
1993 if (rxcp == NULL) {
1994 if (lancer_chip(adapter))
1995 break;
1996
1997 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1998 dev_warn(&adapter->pdev->dev,
1999 "did not receive flush compl\n");
2000 break;
2001 }
2002 be_cq_notify(adapter, rx_cq->id, true, 0);
2003 mdelay(1);
2004 } else {
2005 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002006 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002007 if (rxcp->num_rcvd == 0)
2008 break;
2009 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 }
2011
Sathya Perlad23e9462012-12-17 19:38:51 +00002012 /* After cleanup, leave the CQ in unarmed state */
2013 be_cq_notify(adapter, rx_cq->id, false, 0);
2014
2015 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302016 while (atomic_read(&rxq->used) > 0) {
2017 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 put_page(page_info->page);
2019 memset(page_info, 0, sizeof(*page_info));
2020 }
2021 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002022 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023}
2024
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002025static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002027 struct be_tx_obj *txo;
2028 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002029 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002030 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002031 struct sk_buff *sent_skb;
2032 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002033 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302035 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002036 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002037 pending_txqs = adapter->num_tx_qs;
2038
2039 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302040 cmpl = 0;
2041 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002042 txq = &txo->q;
2043 while ((txcp = be_tx_compl_get(&txo->cq))) {
2044 end_idx =
2045 AMAP_GET_BITS(struct amap_eth_tx_compl,
2046 wrb_index, txcp);
2047 num_wrbs += be_tx_compl_process(adapter, txo,
2048 end_idx);
2049 cmpl++;
2050 }
2051 if (cmpl) {
2052 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2053 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302054 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002055 }
2056 if (atomic_read(&txq->used) == 0)
2057 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002058 }
2059
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302060 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002061 break;
2062
2063 mdelay(1);
2064 } while (true);
2065
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002066 for_all_tx_queues(adapter, txo, i) {
2067 txq = &txo->q;
2068 if (atomic_read(&txq->used))
2069 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2070 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002071
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002072 /* free posted tx for which compls will never arrive */
2073 while (atomic_read(&txq->used)) {
2074 sent_skb = txo->sent_skb_list[txq->tail];
2075 end_idx = txq->tail;
2076 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2077 &dummy_wrb);
2078 index_adv(&end_idx, num_wrbs - 1, txq->len);
2079 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2080 atomic_sub(num_wrbs, &txq->used);
2081 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002082 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083}
2084
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002085static void be_evt_queues_destroy(struct be_adapter *adapter)
2086{
2087 struct be_eq_obj *eqo;
2088 int i;
2089
2090 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002091 if (eqo->q.created) {
2092 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302094 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302095 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002096 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 be_queue_free(adapter, &eqo->q);
2098 }
2099}
2100
2101static int be_evt_queues_create(struct be_adapter *adapter)
2102{
2103 struct be_queue_info *eq;
2104 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302105 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 int i, rc;
2107
Sathya Perla92bf14a2013-08-27 16:57:32 +05302108 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2109 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110
2111 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302112 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2113 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302114 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302115 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 eqo->adapter = adapter;
2117 eqo->tx_budget = BE_TX_BUDGET;
2118 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302119 aic->max_eqd = BE_MAX_EQD;
2120 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121
2122 eq = &eqo->q;
2123 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302124 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 if (rc)
2126 return rc;
2127
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302128 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129 if (rc)
2130 return rc;
2131 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002132 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133}
2134
Sathya Perla5fb379e2009-06-18 00:02:59 +00002135static void be_mcc_queues_destroy(struct be_adapter *adapter)
2136{
2137 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002138
Sathya Perla8788fdc2009-07-27 22:52:03 +00002139 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002140 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002141 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002142 be_queue_free(adapter, q);
2143
Sathya Perla8788fdc2009-07-27 22:52:03 +00002144 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002145 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002146 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002147 be_queue_free(adapter, q);
2148}
2149
2150/* Must be called only after TX qs are created as MCC shares TX EQ */
2151static int be_mcc_queues_create(struct be_adapter *adapter)
2152{
2153 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002154
Sathya Perla8788fdc2009-07-27 22:52:03 +00002155 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002156 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302157 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002158 goto err;
2159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 /* Use the default EQ for MCC completions */
2161 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002162 goto mcc_cq_free;
2163
Sathya Perla8788fdc2009-07-27 22:52:03 +00002164 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002165 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2166 goto mcc_cq_destroy;
2167
Sathya Perla8788fdc2009-07-27 22:52:03 +00002168 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002169 goto mcc_q_free;
2170
2171 return 0;
2172
2173mcc_q_free:
2174 be_queue_free(adapter, q);
2175mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002176 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002177mcc_cq_free:
2178 be_queue_free(adapter, cq);
2179err:
2180 return -1;
2181}
2182
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183static void be_tx_queues_destroy(struct be_adapter *adapter)
2184{
2185 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002186 struct be_tx_obj *txo;
2187 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188
Sathya Perla3c8def92011-06-12 20:01:58 +00002189 for_all_tx_queues(adapter, txo, i) {
2190 q = &txo->q;
2191 if (q->created)
2192 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2193 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194
Sathya Perla3c8def92011-06-12 20:01:58 +00002195 q = &txo->cq;
2196 if (q->created)
2197 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2198 be_queue_free(adapter, q);
2199 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200}
2201
Sathya Perla77071332013-08-27 16:57:34 +05302202static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002205 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302206 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
Sathya Perla92bf14a2013-08-27 16:57:32 +05302208 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002209
Sathya Perla3c8def92011-06-12 20:01:58 +00002210 for_all_tx_queues(adapter, txo, i) {
2211 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2213 sizeof(struct be_eth_tx_compl));
2214 if (status)
2215 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
John Stultz827da442013-10-07 15:51:58 -07002217 u64_stats_init(&txo->stats.sync);
2218 u64_stats_init(&txo->stats.sync_compl);
2219
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220 /* If num_evt_qs is less than num_tx_qs, then more than
2221 * one txq share an eq
2222 */
2223 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2224 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2225 if (status)
2226 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002228 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2229 sizeof(struct be_eth_wrb));
2230 if (status)
2231 return status;
2232
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002233 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002234 if (status)
2235 return status;
2236 }
2237
Sathya Perlad3791422012-09-28 04:39:44 +00002238 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2239 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 return 0;
2241}
2242
2243static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244{
2245 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 struct be_rx_obj *rxo;
2247 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248
Sathya Perla3abcded2010-10-03 22:12:27 -07002249 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002250 q = &rxo->cq;
2251 if (q->created)
2252 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2253 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255}
2256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002258{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002260 struct be_rx_obj *rxo;
2261 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262
Sathya Perla92bf14a2013-08-27 16:57:32 +05302263 /* We can create as many RSS rings as there are EQs. */
2264 adapter->num_rx_qs = adapter->num_evt_qs;
2265
2266 /* We'll use RSS only if atleast 2 RSS rings are supported.
2267 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302269 if (adapter->num_rx_qs > 1)
2270 adapter->num_rx_qs++;
2271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002273 for_all_rx_queues(adapter, rxo, i) {
2274 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 cq = &rxo->cq;
2276 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302277 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
John Stultz827da442013-10-07 15:51:58 -07002281 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2283 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002284 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002286 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287
Sathya Perlad3791422012-09-28 04:39:44 +00002288 dev_info(&adapter->pdev->dev,
2289 "created %d RSS queue(s) and 1 default RX queue\n",
2290 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002292}
2293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294static irqreturn_t be_intx(int irq, void *dev)
2295{
Sathya Perlae49cc342012-11-27 19:50:02 +00002296 struct be_eq_obj *eqo = dev;
2297 struct be_adapter *adapter = eqo->adapter;
2298 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002300 /* IRQ is not expected when NAPI is scheduled as the EQ
2301 * will not be armed.
2302 * But, this can happen on Lancer INTx where it takes
2303 * a while to de-assert INTx or in BE2 where occasionaly
2304 * an interrupt may be raised even when EQ is unarmed.
2305 * If NAPI is already scheduled, then counting & notifying
2306 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002307 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002308 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002309 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002310 __napi_schedule(&eqo->napi);
2311 if (num_evts)
2312 eqo->spurious_intr = 0;
2313 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002314 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002315
2316 /* Return IRQ_HANDLED only for the the first spurious intr
2317 * after a valid intr to stop the kernel from branding
2318 * this irq as a bad one!
2319 */
2320 if (num_evts || eqo->spurious_intr++ == 0)
2321 return IRQ_HANDLED;
2322 else
2323 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324}
2325
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329
Sathya Perla0b545a62012-11-23 00:27:18 +00002330 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2331 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332 return IRQ_HANDLED;
2333}
2334
Sathya Perla2e588f82011-03-11 02:49:26 +00002335static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336{
Somnath Koture38b1702013-05-29 22:55:56 +00002337 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338}
2339
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302341 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342{
Sathya Perla3abcded2010-10-03 22:12:27 -07002343 struct be_adapter *adapter = rxo->adapter;
2344 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002345 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346 u32 work_done;
2347
2348 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002349 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002350 if (!rxcp)
2351 break;
2352
Sathya Perla12004ae2011-08-02 19:57:46 +00002353 /* Is it a flush compl that has no data */
2354 if (unlikely(rxcp->num_rcvd == 0))
2355 goto loop_continue;
2356
2357 /* Discard compl with partial DMA Lancer B0 */
2358 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002360 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002361 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002362
Sathya Perla12004ae2011-08-02 19:57:46 +00002363 /* On BE drop pkts that arrive due to imperfect filtering in
2364 * promiscuous mode on some skews
2365 */
2366 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302367 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002369 goto loop_continue;
2370 }
2371
Sathya Perla6384a4d2013-10-25 10:40:16 +05302372 /* Don't do gro when we're busy_polling */
2373 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002374 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002375 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302376 be_rx_compl_process(rxo, napi, rxcp);
2377
Sathya Perla12004ae2011-08-02 19:57:46 +00002378loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002379 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380 }
2381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382 if (work_done) {
2383 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002384
Sathya Perla6384a4d2013-10-25 10:40:16 +05302385 /* When an rx-obj gets into post_starved state, just
2386 * let be_worker do the posting.
2387 */
2388 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2389 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393 return work_done;
2394}
2395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2397 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 for (work_done = 0; work_done < budget; work_done++) {
2403 txcp = be_tx_compl_get(&txo->cq);
2404 if (!txcp)
2405 break;
2406 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302407 AMAP_GET_BITS(struct
2408 amap_eth_tx_compl,
2409 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410 }
2411
2412 if (work_done) {
2413 be_cq_notify(adapter, txo->cq.id, true, work_done);
2414 atomic_sub(num_wrbs, &txo->q.used);
2415
2416 /* As Tx wrbs have been freed up, wake up netdev queue
2417 * if it was stopped due to lack of tx wrbs. */
2418 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302419 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002421 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002422
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002423 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2424 tx_stats(txo)->tx_compl += work_done;
2425 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2426 }
2427 return (work_done < budget); /* Done */
2428}
Sathya Perla3c8def92011-06-12 20:01:58 +00002429
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302430int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431{
2432 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2433 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002434 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302435 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002437
Sathya Perla0b545a62012-11-23 00:27:18 +00002438 num_evts = events_get(eqo);
2439
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 /* Process all TXQs serviced by this EQ */
2441 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2442 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2443 eqo->tx_budget, i);
2444 if (!tx_done)
2445 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446 }
2447
Sathya Perla6384a4d2013-10-25 10:40:16 +05302448 if (be_lock_napi(eqo)) {
2449 /* This loop will iterate twice for EQ0 in which
2450 * completions of the last RXQ (default one) are also processed
2451 * For other EQs the loop iterates only once
2452 */
2453 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2454 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2455 max_work = max(work, max_work);
2456 }
2457 be_unlock_napi(eqo);
2458 } else {
2459 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002460 }
2461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 if (is_mcc_eqo(eqo))
2463 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 if (max_work < budget) {
2466 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002467 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 } else {
2469 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002470 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002471 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002472 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473}
2474
Sathya Perla6384a4d2013-10-25 10:40:16 +05302475#ifdef CONFIG_NET_RX_BUSY_POLL
2476static int be_busy_poll(struct napi_struct *napi)
2477{
2478 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2479 struct be_adapter *adapter = eqo->adapter;
2480 struct be_rx_obj *rxo;
2481 int i, work = 0;
2482
2483 if (!be_lock_busy_poll(eqo))
2484 return LL_FLUSH_BUSY;
2485
2486 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2487 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2488 if (work)
2489 break;
2490 }
2491
2492 be_unlock_busy_poll(eqo);
2493 return work;
2494}
2495#endif
2496
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002497void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002498{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002499 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2500 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002501 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302502 bool error_detected = false;
2503 struct device *dev = &adapter->pdev->dev;
2504 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002505
Sathya Perlad23e9462012-12-17 19:38:51 +00002506 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002507 return;
2508
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002509 if (lancer_chip(adapter)) {
2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302513 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002514 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302515 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302516 adapter->hw_error = true;
2517 /* Do not log error messages if its a FW reset */
2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2519 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2520 dev_info(dev, "Firmware update in progress\n");
2521 } else {
2522 error_detected = true;
2523 dev_err(dev, "Error detected in the card\n");
2524 dev_err(dev, "ERR: sliport status 0x%x\n",
2525 sliport_status);
2526 dev_err(dev, "ERR: sliport error1 0x%x\n",
2527 sliport_err1);
2528 dev_err(dev, "ERR: sliport error2 0x%x\n",
2529 sliport_err2);
2530 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002531 }
2532 } else {
2533 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302534 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002535 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302536 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002537 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302538 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002539 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302540 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002541
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002542 ue_lo = (ue_lo & ~ue_lo_mask);
2543 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002544
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302545 /* On certain platforms BE hardware can indicate spurious UEs.
2546 * Allow HW to stop working completely in case of a real UE.
2547 * Hence not setting the hw_error for UE detection.
2548 */
2549
2550 if (ue_lo || ue_hi) {
2551 error_detected = true;
2552 dev_err(dev,
2553 "Unrecoverable Error detected in the adapter");
2554 dev_err(dev, "Please reboot server to recover");
2555 if (skyhawk_chip(adapter))
2556 adapter->hw_error = true;
2557 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2558 if (ue_lo & 1)
2559 dev_err(dev, "UE: %s bit set\n",
2560 ue_status_low_desc[i]);
2561 }
2562 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2563 if (ue_hi & 1)
2564 dev_err(dev, "UE: %s bit set\n",
2565 ue_status_hi_desc[i]);
2566 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302567 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002568 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302569 if (error_detected)
2570 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002571}
2572
Sathya Perla8d56ff12009-11-22 22:02:26 +00002573static void be_msix_disable(struct be_adapter *adapter)
2574{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002575 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002576 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002577 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302578 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002579 }
2580}
2581
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002582static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002584 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002585 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586
Sathya Perla92bf14a2013-08-27 16:57:32 +05302587 /* If RoCE is supported, program the max number of NIC vectors that
2588 * may be configured via set-channels, along with vectors needed for
2589 * RoCe. Else, just program the number we'll use initially.
2590 */
2591 if (be_roce_supported(adapter))
2592 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2593 2 * num_online_cpus());
2594 else
2595 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002596
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002597 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598 adapter->msix_entries[i].entry = i;
2599
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002600 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2601 MIN_MSIX_VECTORS, num_vec);
2602 if (num_vec < 0)
2603 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002604
Sathya Perla92bf14a2013-08-27 16:57:32 +05302605 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2606 adapter->num_msix_roce_vec = num_vec / 2;
2607 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2608 adapter->num_msix_roce_vec);
2609 }
2610
2611 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2612
2613 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2614 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002615 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002616
2617fail:
2618 dev_warn(dev, "MSIx enable failed\n");
2619
2620 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2621 if (!be_physfn(adapter))
2622 return num_vec;
2623 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624}
2625
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002626static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302627 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302629 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630}
2631
2632static int be_msix_register(struct be_adapter *adapter)
2633{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 struct net_device *netdev = adapter->netdev;
2635 struct be_eq_obj *eqo;
2636 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 for_all_evt_queues(adapter, eqo, i) {
2639 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2640 vec = be_msix_vec_get(adapter, eqo);
2641 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002642 if (status)
2643 goto err_msix;
2644 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002647err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302651 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002652 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653 return status;
2654}
2655
2656static int be_irq_register(struct be_adapter *adapter)
2657{
2658 struct net_device *netdev = adapter->netdev;
2659 int status;
2660
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002661 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662 status = be_msix_register(adapter);
2663 if (status == 0)
2664 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002665 /* INTx is not supported for VF */
2666 if (!be_physfn(adapter))
2667 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668 }
2669
Sathya Perlae49cc342012-11-27 19:50:02 +00002670 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671 netdev->irq = adapter->pdev->irq;
2672 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002673 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674 if (status) {
2675 dev_err(&adapter->pdev->dev,
2676 "INTx request IRQ failed - err %d\n", status);
2677 return status;
2678 }
2679done:
2680 adapter->isr_registered = true;
2681 return 0;
2682}
2683
2684static void be_irq_unregister(struct be_adapter *adapter)
2685{
2686 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002688 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002689
2690 if (!adapter->isr_registered)
2691 return;
2692
2693 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002694 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002695 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696 goto done;
2697 }
2698
2699 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 for_all_evt_queues(adapter, eqo, i)
2701 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703done:
2704 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705}
2706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002708{
2709 struct be_queue_info *q;
2710 struct be_rx_obj *rxo;
2711 int i;
2712
2713 for_all_rx_queues(adapter, rxo, i) {
2714 q = &rxo->q;
2715 if (q->created) {
2716 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002718 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002720 }
2721}
2722
Sathya Perla889cd4b2010-05-30 23:33:45 +00002723static int be_close(struct net_device *netdev)
2724{
2725 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002726 struct be_eq_obj *eqo;
2727 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002728
Kalesh APe1ad8e32014-04-14 16:12:41 +05302729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
Parav Pandit045508a2012-03-26 14:27:13 +00002735 be_roce_dev_close(adapter);
2736
Ivan Veceradff345c52013-11-27 08:59:32 +01002737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2738 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002739 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302740 be_disable_busy_poll(eqo);
2741 }
David S. Miller71237b62013-11-28 18:53:36 -05002742 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002743 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002744
2745 be_async_mcc_disable(adapter);
2746
2747 /* Wait for all pending tx completions to arrive so that
2748 * all tx skbs are freed.
2749 */
Sathya Perlafba87552013-05-08 02:05:50 +00002750 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302751 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002752
2753 be_rx_qs_destroy(adapter);
2754
Ajit Khaparded11a3472013-11-18 10:44:37 -06002755 for (i = 1; i < (adapter->uc_macs + 1); i++)
2756 be_cmd_pmac_del(adapter, adapter->if_handle,
2757 adapter->pmac_id[i], 0);
2758 adapter->uc_macs = 0;
2759
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002760 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 if (msix_enabled(adapter))
2762 synchronize_irq(be_msix_vec_get(adapter, eqo));
2763 else
2764 synchronize_irq(netdev->irq);
2765 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002766 }
2767
Sathya Perla889cd4b2010-05-30 23:33:45 +00002768 be_irq_unregister(adapter);
2769
Sathya Perla482c9e72011-06-29 23:33:17 +00002770 return 0;
2771}
2772
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002773static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002774{
2775 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002776 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302777 u8 rss_hkey[RSS_HASH_KEY_LEN];
2778 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002779
2780 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2782 sizeof(struct be_eth_rx_d));
2783 if (rc)
2784 return rc;
2785 }
2786
2787 /* The FW would like the default RXQ to be created first */
2788 rxo = default_rxo(adapter);
2789 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2790 adapter->if_handle, false, &rxo->rss_id);
2791 if (rc)
2792 return rc;
2793
2794 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002795 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 rx_frag_size, adapter->if_handle,
2797 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002798 if (rc)
2799 return rc;
2800 }
2801
2802 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302803 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2804 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002805 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302806 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002807 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302808 rss->rsstable[j + i] = rxo->rss_id;
2809 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002810 }
2811 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302812 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2813 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002814
2815 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302816 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2817 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302818 } else {
2819 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302820 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302821 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002822
Venkata Duvvurue2557872014-04-21 15:38:00 +05302823 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302824 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302825 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302826 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302827 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302828 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002829 }
2830
Venkata Duvvurue2557872014-04-21 15:38:00 +05302831 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2832
Sathya Perla482c9e72011-06-29 23:33:17 +00002833 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002835 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002836 return 0;
2837}
2838
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839static int be_open(struct net_device *netdev)
2840{
2841 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002843 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002844 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002845 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002846 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002847
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002849 if (status)
2850 goto err;
2851
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002852 status = be_irq_register(adapter);
2853 if (status)
2854 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002856 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002857 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002858
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002859 for_all_tx_queues(adapter, txo, i)
2860 be_cq_notify(adapter, txo->cq.id, true, 0);
2861
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002862 be_async_mcc_enable(adapter);
2863
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002864 for_all_evt_queues(adapter, eqo, i) {
2865 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302866 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2868 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002869 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002870
Sathya Perla323ff712012-09-28 04:39:43 +00002871 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002872 if (!status)
2873 be_link_status_update(adapter, link_status);
2874
Sathya Perlafba87552013-05-08 02:05:50 +00002875 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002876 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302877
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302878#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302879 if (skyhawk_chip(adapter))
2880 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302881#endif
2882
Sathya Perla889cd4b2010-05-30 23:33:45 +00002883 return 0;
2884err:
2885 be_close(adapter->netdev);
2886 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002887}
2888
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002889static int be_setup_wol(struct be_adapter *adapter, bool enable)
2890{
2891 struct be_dma_mem cmd;
2892 int status = 0;
2893 u8 mac[ETH_ALEN];
2894
2895 memset(mac, 0, ETH_ALEN);
2896
2897 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002898 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2899 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002900 if (cmd.va == NULL)
2901 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002902
2903 if (enable) {
2904 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302905 PCICFG_PM_CONTROL_OFFSET,
2906 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002907 if (status) {
2908 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002909 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002910 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2911 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002912 return status;
2913 }
2914 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302915 adapter->netdev->dev_addr,
2916 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002917 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2918 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2919 } else {
2920 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2921 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2922 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2923 }
2924
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002925 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002926 return status;
2927}
2928
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002929/*
2930 * Generate a seed MAC address from the PF MAC Address using jhash.
2931 * MAC Address for VFs are assigned incrementally starting from the seed.
2932 * These addresses are programmed in the ASIC by the PF and the VF driver
2933 * queries for the MAC address during its probe.
2934 */
Sathya Perla4c876612013-02-03 20:30:11 +00002935static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002936{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002937 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002938 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002939 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002940 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002941
2942 be_vf_eth_addr_generate(adapter, mac);
2943
Sathya Perla11ac75e2011-12-13 00:58:50 +00002944 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302945 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002946 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002947 vf_cfg->if_handle,
2948 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302949 else
2950 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2951 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002952
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002953 if (status)
2954 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302955 "Mac address assignment failed for VF %d\n",
2956 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002957 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002958 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002959
2960 mac[5] += 1;
2961 }
2962 return status;
2963}
2964
Sathya Perla4c876612013-02-03 20:30:11 +00002965static int be_vfs_mac_query(struct be_adapter *adapter)
2966{
2967 int status, vf;
2968 u8 mac[ETH_ALEN];
2969 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002970
2971 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302972 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2973 mac, vf_cfg->if_handle,
2974 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002975 if (status)
2976 return status;
2977 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2978 }
2979 return 0;
2980}
2981
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002982static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002983{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002984 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002985 u32 vf;
2986
Sathya Perla257a3fe2013-06-14 15:54:51 +05302987 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002988 dev_warn(&adapter->pdev->dev,
2989 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002990 goto done;
2991 }
2992
Sathya Perlab4c1df92013-05-08 02:05:47 +00002993 pci_disable_sriov(adapter->pdev);
2994
Sathya Perla11ac75e2011-12-13 00:58:50 +00002995 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302996 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002997 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2998 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302999 else
3000 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3001 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003002
Sathya Perla11ac75e2011-12-13 00:58:50 +00003003 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3004 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003005done:
3006 kfree(adapter->vf_cfg);
3007 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003008}
3009
Sathya Perla77071332013-08-27 16:57:34 +05303010static void be_clear_queues(struct be_adapter *adapter)
3011{
3012 be_mcc_queues_destroy(adapter);
3013 be_rx_cqs_destroy(adapter);
3014 be_tx_queues_destroy(adapter);
3015 be_evt_queues_destroy(adapter);
3016}
3017
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303018static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003019{
Sathya Perla191eb752012-02-23 18:50:13 +00003020 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3021 cancel_delayed_work_sync(&adapter->work);
3022 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3023 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303024}
3025
Somnath Koturb05004a2013-12-05 12:08:16 +05303026static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303027{
3028 int i;
3029
Somnath Koturb05004a2013-12-05 12:08:16 +05303030 if (adapter->pmac_id) {
3031 for (i = 0; i < (adapter->uc_macs + 1); i++)
3032 be_cmd_pmac_del(adapter, adapter->if_handle,
3033 adapter->pmac_id[i], 0);
3034 adapter->uc_macs = 0;
3035
3036 kfree(adapter->pmac_id);
3037 adapter->pmac_id = NULL;
3038 }
3039}
3040
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303041#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303042static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3043{
3044 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3045 be_cmd_manage_iface(adapter, adapter->if_handle,
3046 OP_CONVERT_TUNNEL_TO_NORMAL);
3047
3048 if (adapter->vxlan_port)
3049 be_cmd_set_vxlan_port(adapter, 0);
3050
3051 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3052 adapter->vxlan_port = 0;
3053}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303054#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303055
Somnath Koturb05004a2013-12-05 12:08:16 +05303056static int be_clear(struct be_adapter *adapter)
3057{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303058 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003059
Sathya Perla11ac75e2011-12-13 00:58:50 +00003060 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003061 be_vf_clear(adapter);
3062
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303063#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303064 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303065#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303066 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303067 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003068
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003069 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003070
Sathya Perla77071332013-08-27 16:57:34 +05303071 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003073 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303074 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003075 return 0;
3076}
3077
Sathya Perla4c876612013-02-03 20:30:11 +00003078static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003079{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303080 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003081 struct be_vf_cfg *vf_cfg;
3082 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003083 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003084
Sathya Perla4c876612013-02-03 20:30:11 +00003085 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3086 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003087
Sathya Perla4c876612013-02-03 20:30:11 +00003088 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303089 if (!BE3_chip(adapter)) {
3090 status = be_cmd_get_profile_config(adapter, &res,
3091 vf + 1);
3092 if (!status)
3093 cap_flags = res.if_cap_flags;
3094 }
Sathya Perla4c876612013-02-03 20:30:11 +00003095
3096 /* If a FW profile exists, then cap_flags are updated */
3097 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303098 BE_IF_FLAGS_BROADCAST |
3099 BE_IF_FLAGS_MULTICAST);
3100 status =
3101 be_cmd_if_create(adapter, cap_flags, en_flags,
3102 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003103 if (status)
3104 goto err;
3105 }
3106err:
3107 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003108}
3109
Sathya Perla39f1d942012-05-08 19:41:24 +00003110static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003111{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003112 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003113 int vf;
3114
Sathya Perla39f1d942012-05-08 19:41:24 +00003115 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3116 GFP_KERNEL);
3117 if (!adapter->vf_cfg)
3118 return -ENOMEM;
3119
Sathya Perla11ac75e2011-12-13 00:58:50 +00003120 for_all_vfs(adapter, vf_cfg, vf) {
3121 vf_cfg->if_handle = -1;
3122 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003123 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003124 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003125}
3126
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003127static int be_vf_setup(struct be_adapter *adapter)
3128{
Sathya Perla4c876612013-02-03 20:30:11 +00003129 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303130 struct be_vf_cfg *vf_cfg;
3131 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303132 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303133 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003134
Sathya Perla257a3fe2013-06-14 15:54:51 +05303135 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003136 if (old_vfs) {
3137 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3138 if (old_vfs != num_vfs)
3139 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3140 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003141 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303142 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003143 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303144 be_max_vfs(adapter), num_vfs);
3145 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003146 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003147 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003148 }
3149
3150 status = be_vf_setup_init(adapter);
3151 if (status)
3152 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003153
Sathya Perla4c876612013-02-03 20:30:11 +00003154 if (old_vfs) {
3155 for_all_vfs(adapter, vf_cfg, vf) {
3156 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3157 if (status)
3158 goto err;
3159 }
3160 } else {
3161 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003162 if (status)
3163 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003164 }
3165
Sathya Perla4c876612013-02-03 20:30:11 +00003166 if (old_vfs) {
3167 status = be_vfs_mac_query(adapter);
3168 if (status)
3169 goto err;
3170 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003171 status = be_vf_eth_addr_config(adapter);
3172 if (status)
3173 goto err;
3174 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003175
Sathya Perla11ac75e2011-12-13 00:58:50 +00003176 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303177 /* Allow VFs to programs MAC/VLAN filters */
3178 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3179 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3180 status = be_cmd_set_fn_privileges(adapter,
3181 privileges |
3182 BE_PRIV_FILTMGMT,
3183 vf + 1);
3184 if (!status)
3185 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3186 vf);
3187 }
3188
Sathya Perla4c876612013-02-03 20:30:11 +00003189 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3190 * Allow full available bandwidth
3191 */
3192 if (BE3_chip(adapter) && !old_vfs)
Sathya Perlaa4018012014-03-27 10:46:18 +05303193 be_cmd_config_qos(adapter, 1000, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003194
3195 status = be_cmd_link_status_query(adapter, &lnk_speed,
3196 NULL, vf + 1);
3197 if (!status)
3198 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003199
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303200 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303201 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303202 be_cmd_set_logical_link_config(adapter,
3203 IFLA_VF_LINK_STATE_AUTO,
3204 vf+1);
3205 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003206 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003207
3208 if (!old_vfs) {
3209 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3210 if (status) {
3211 dev_err(dev, "SRIOV enable failed\n");
3212 adapter->num_vfs = 0;
3213 goto err;
3214 }
3215 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216 return 0;
3217err:
Sathya Perla4c876612013-02-03 20:30:11 +00003218 dev_err(dev, "VF setup failed\n");
3219 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003220 return status;
3221}
3222
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303223/* Converting function_mode bits on BE3 to SH mc_type enums */
3224
3225static u8 be_convert_mc_type(u32 function_mode)
3226{
3227 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3228 return vNIC1;
3229 else if (function_mode & FLEX10_MODE)
3230 return FLEX10;
3231 else if (function_mode & VNIC_MODE)
3232 return vNIC2;
3233 else if (function_mode & UMC_ENABLED)
3234 return UMC;
3235 else
3236 return MC_NONE;
3237}
3238
Sathya Perla92bf14a2013-08-27 16:57:32 +05303239/* On BE2/BE3 FW does not suggest the supported limits */
3240static void BEx_get_resources(struct be_adapter *adapter,
3241 struct be_resources *res)
3242{
3243 struct pci_dev *pdev = adapter->pdev;
3244 bool use_sriov = false;
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303245 int max_vfs = 0;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303246
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303247 if (be_physfn(adapter) && BE3_chip(adapter)) {
3248 be_cmd_get_profile_config(adapter, res, 0);
3249 /* Some old versions of BE3 FW don't report max_vfs value */
3250 if (res->max_vfs == 0) {
3251 max_vfs = pci_sriov_get_totalvfs(pdev);
3252 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3253 }
3254 use_sriov = res->max_vfs && sriov_want(adapter);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303255 }
3256
3257 if (be_physfn(adapter))
3258 res->max_uc_mac = BE_UC_PMAC_COUNT;
3259 else
3260 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3261
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303262 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3263
3264 if (be_is_mc(adapter)) {
3265 /* Assuming that there are 4 channels per port,
3266 * when multi-channel is enabled
3267 */
3268 if (be_is_qnq_mode(adapter))
3269 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3270 else
3271 /* In a non-qnq multichannel mode, the pvid
3272 * takes up one vlan entry
3273 */
3274 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3275 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303276 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303277 }
3278
Sathya Perla92bf14a2013-08-27 16:57:32 +05303279 res->max_mcast_mac = BE_MAX_MC;
3280
Vasundhara Volama5243da2014-03-11 18:53:07 +05303281 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3282 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3283 * *only* if it is RSS-capable.
3284 */
3285 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3286 !be_physfn(adapter) || (be_is_mc(adapter) &&
3287 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303288 res->max_tx_qs = 1;
3289 else
3290 res->max_tx_qs = BE3_MAX_TX_QS;
3291
3292 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3293 !use_sriov && be_physfn(adapter))
3294 res->max_rss_qs = (adapter->be3_native) ?
3295 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3296 res->max_rx_qs = res->max_rss_qs + 1;
3297
Suresh Reddye3dc8672014-01-06 13:02:25 +05303298 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303299 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303300 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3301 else
3302 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303303
3304 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3305 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3306 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3307}
3308
Sathya Perla30128032011-11-10 19:17:57 +00003309static void be_setup_init(struct be_adapter *adapter)
3310{
3311 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003312 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003313 adapter->if_handle = -1;
3314 adapter->be3_native = false;
3315 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003316 if (be_physfn(adapter))
3317 adapter->cmd_privileges = MAX_PRIVILEGES;
3318 else
3319 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003320}
3321
Sathya Perla92bf14a2013-08-27 16:57:32 +05303322static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003323{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303324 struct device *dev = &adapter->pdev->dev;
3325 struct be_resources res = {0};
3326 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003327
Sathya Perla92bf14a2013-08-27 16:57:32 +05303328 if (BEx_chip(adapter)) {
3329 BEx_get_resources(adapter, &res);
3330 adapter->res = res;
3331 }
3332
Sathya Perla92bf14a2013-08-27 16:57:32 +05303333 /* For Lancer, SH etc read per-function resource limits from FW.
3334 * GET_FUNC_CONFIG returns per function guaranteed limits.
3335 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3336 */
Sathya Perla4c876612013-02-03 20:30:11 +00003337 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303338 status = be_cmd_get_func_config(adapter, &res);
3339 if (status)
3340 return status;
3341
3342 /* If RoCE may be enabled stash away half the EQs for RoCE */
3343 if (be_roce_supported(adapter))
3344 res.max_evt_qs /= 2;
3345 adapter->res = res;
3346
3347 if (be_physfn(adapter)) {
3348 status = be_cmd_get_profile_config(adapter, &res, 0);
3349 if (status)
3350 return status;
3351 adapter->res.max_vfs = res.max_vfs;
3352 }
3353
3354 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3355 be_max_txqs(adapter), be_max_rxqs(adapter),
3356 be_max_rss(adapter), be_max_eqs(adapter),
3357 be_max_vfs(adapter));
3358 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3359 be_max_uc(adapter), be_max_mc(adapter),
3360 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003361 }
3362
Sathya Perla92bf14a2013-08-27 16:57:32 +05303363 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003364}
3365
Sathya Perla39f1d942012-05-08 19:41:24 +00003366/* Routine to query per function resource limits */
3367static int be_get_config(struct be_adapter *adapter)
3368{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303369 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003370 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003371
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003372 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3373 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003374 &adapter->function_caps,
3375 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003376 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303377 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003378
Vasundhara Volam542963b2014-01-15 13:23:33 +05303379 if (be_physfn(adapter)) {
3380 status = be_cmd_get_active_profile(adapter, &profile_id);
3381 if (!status)
3382 dev_info(&adapter->pdev->dev,
3383 "Using profile 0x%x\n", profile_id);
3384 }
3385
Sathya Perla92bf14a2013-08-27 16:57:32 +05303386 status = be_get_resources(adapter);
3387 if (status)
3388 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003389
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303390 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3391 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303392 if (!adapter->pmac_id)
3393 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003394
Sathya Perla92bf14a2013-08-27 16:57:32 +05303395 /* Sanitize cfg_num_qs based on HW and platform limits */
3396 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3397
3398 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003399}
3400
Sathya Perla95046b92013-07-23 15:25:02 +05303401static int be_mac_setup(struct be_adapter *adapter)
3402{
3403 u8 mac[ETH_ALEN];
3404 int status;
3405
3406 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3407 status = be_cmd_get_perm_mac(adapter, mac);
3408 if (status)
3409 return status;
3410
3411 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3412 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3413 } else {
3414 /* Maybe the HW was reset; dev_addr must be re-programmed */
3415 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3416 }
3417
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003418 /* For BE3-R VFs, the PF programs the initial MAC address */
3419 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3420 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3421 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303422 return 0;
3423}
3424
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303425static void be_schedule_worker(struct be_adapter *adapter)
3426{
3427 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3428 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3429}
3430
Sathya Perla77071332013-08-27 16:57:34 +05303431static int be_setup_queues(struct be_adapter *adapter)
3432{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303433 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303434 int status;
3435
3436 status = be_evt_queues_create(adapter);
3437 if (status)
3438 goto err;
3439
3440 status = be_tx_qs_create(adapter);
3441 if (status)
3442 goto err;
3443
3444 status = be_rx_cqs_create(adapter);
3445 if (status)
3446 goto err;
3447
3448 status = be_mcc_queues_create(adapter);
3449 if (status)
3450 goto err;
3451
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303452 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3453 if (status)
3454 goto err;
3455
3456 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3457 if (status)
3458 goto err;
3459
Sathya Perla77071332013-08-27 16:57:34 +05303460 return 0;
3461err:
3462 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3463 return status;
3464}
3465
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303466int be_update_queues(struct be_adapter *adapter)
3467{
3468 struct net_device *netdev = adapter->netdev;
3469 int status;
3470
3471 if (netif_running(netdev))
3472 be_close(netdev);
3473
3474 be_cancel_worker(adapter);
3475
3476 /* If any vectors have been shared with RoCE we cannot re-program
3477 * the MSIx table.
3478 */
3479 if (!adapter->num_msix_roce_vec)
3480 be_msix_disable(adapter);
3481
3482 be_clear_queues(adapter);
3483
3484 if (!msix_enabled(adapter)) {
3485 status = be_msix_enable(adapter);
3486 if (status)
3487 return status;
3488 }
3489
3490 status = be_setup_queues(adapter);
3491 if (status)
3492 return status;
3493
3494 be_schedule_worker(adapter);
3495
3496 if (netif_running(netdev))
3497 status = be_open(netdev);
3498
3499 return status;
3500}
3501
Sathya Perla5fb379e2009-06-18 00:02:59 +00003502static int be_setup(struct be_adapter *adapter)
3503{
Sathya Perla39f1d942012-05-08 19:41:24 +00003504 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303505 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003506 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003507
Sathya Perla30128032011-11-10 19:17:57 +00003508 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003509
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003510 if (!lancer_chip(adapter))
3511 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003512
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003513 status = be_get_config(adapter);
3514 if (status)
3515 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003516
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003517 status = be_msix_enable(adapter);
3518 if (status)
3519 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003520
Sathya Perla77071332013-08-27 16:57:34 +05303521 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3522 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3523 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3524 en_flags |= BE_IF_FLAGS_RSS;
3525 en_flags = en_flags & be_if_cap_flags(adapter);
3526 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3527 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003528 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003529 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003530
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303531 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3532 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303533 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303534 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003535 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003536 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003538 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003539
Sathya Perla95046b92013-07-23 15:25:02 +05303540 status = be_mac_setup(adapter);
3541 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003542 goto err;
3543
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003544 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003545
Somnath Koture9e2a902013-10-24 14:37:53 +05303546 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3547 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3548 adapter->fw_ver);
3549 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3550 }
3551
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003552 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003553 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003554
3555 be_set_rx_mode(adapter->netdev);
3556
Suresh Reddy76a9e082014-01-15 13:23:40 +05303557 be_cmd_get_acpi_wol_cap(adapter);
3558
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003559 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003560
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003561 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3562 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003563 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003564
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303565 if (be_physfn(adapter))
3566 be_cmd_set_logical_link_config(adapter,
3567 IFLA_VF_LINK_STATE_AUTO, 0);
3568
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303569 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303570 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003571 be_vf_setup(adapter);
3572 else
3573 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003574 }
3575
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003576 status = be_cmd_get_phy_info(adapter);
3577 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003578 adapter->phy.fc_autoneg = 1;
3579
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303580 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303581 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003582 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003583err:
3584 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585 return status;
3586}
3587
Ivan Vecera66268732011-12-08 01:31:21 +00003588#ifdef CONFIG_NET_POLL_CONTROLLER
3589static void be_netpoll(struct net_device *netdev)
3590{
3591 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003592 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003593 int i;
3594
Sathya Perlae49cc342012-11-27 19:50:02 +00003595 for_all_evt_queues(adapter, eqo, i) {
3596 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3597 napi_schedule(&eqo->napi);
3598 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003599
3600 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003601}
3602#endif
3603
Ajit Khaparde84517482009-09-04 03:12:16 +00003604#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003605static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003606
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003607static bool be_flash_redboot(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303608 const u8 *p, u32 img_start, int image_size,
3609 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003610{
3611 u32 crc_offset;
3612 u8 flashed_crc[4];
3613 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003614
3615 crc_offset = hdr_size + img_start + image_size - 4;
3616
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003617 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003618
Sathya Perla748b5392014-05-09 13:29:13 +05303619 status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003620 if (status) {
3621 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303622 "could not get crc from flash, not flashing redboot\n");
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003623 return false;
3624 }
3625
3626 /*update redboot only if crc does not match*/
3627 if (!memcmp(flashed_crc, p, 4))
3628 return false;
3629 else
3630 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003631}
3632
Sathya Perla306f1342011-08-02 19:57:45 +00003633static bool phy_flashing_required(struct be_adapter *adapter)
3634{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003635 return (adapter->phy.phy_type == TN_8022 &&
3636 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003637}
3638
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003639static bool is_comp_in_ufi(struct be_adapter *adapter,
3640 struct flash_section_info *fsec, int type)
3641{
3642 int i = 0, img_type = 0;
3643 struct flash_section_info_g2 *fsec_g2 = NULL;
3644
Sathya Perlaca34fe32012-11-06 17:48:56 +00003645 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003646 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3647
3648 for (i = 0; i < MAX_FLASH_COMP; i++) {
3649 if (fsec_g2)
3650 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3651 else
3652 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3653
3654 if (img_type == type)
3655 return true;
3656 }
3657 return false;
3658
3659}
3660
Jingoo Han4188e7d2013-08-05 18:02:02 +09003661static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303662 int header_size,
3663 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003664{
3665 struct flash_section_info *fsec = NULL;
3666 const u8 *p = fw->data;
3667
3668 p += header_size;
3669 while (p < (fw->data + fw->size)) {
3670 fsec = (struct flash_section_info *)p;
3671 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3672 return fsec;
3673 p += 32;
3674 }
3675 return NULL;
3676}
3677
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003678static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303679 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003680{
3681 u32 total_bytes = 0, flash_op, num_bytes = 0;
3682 int status = 0;
3683 struct be_cmd_write_flashrom *req = flash_cmd->va;
3684
3685 total_bytes = img_size;
3686 while (total_bytes) {
3687 num_bytes = min_t(u32, 32*1024, total_bytes);
3688
3689 total_bytes -= num_bytes;
3690
3691 if (!total_bytes) {
3692 if (optype == OPTYPE_PHY_FW)
3693 flash_op = FLASHROM_OPER_PHY_FLASH;
3694 else
3695 flash_op = FLASHROM_OPER_FLASH;
3696 } else {
3697 if (optype == OPTYPE_PHY_FW)
3698 flash_op = FLASHROM_OPER_PHY_SAVE;
3699 else
3700 flash_op = FLASHROM_OPER_SAVE;
3701 }
3702
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003703 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704 img += num_bytes;
3705 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303706 flash_op, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003707 if (status) {
3708 if (status == ILLEGAL_IOCTL_REQ &&
3709 optype == OPTYPE_PHY_FW)
3710 break;
3711 dev_err(&adapter->pdev->dev,
3712 "cmd to write to flash rom failed.\n");
3713 return status;
3714 }
3715 }
3716 return 0;
3717}
3718
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003719/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003720static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303721 const struct firmware *fw,
3722 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003723{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003724 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003725 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003726 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003727 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003728 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003729 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003730
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003731 struct flash_comp gen3_flash_types[] = {
3732 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3733 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3734 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3735 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3736 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3737 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3738 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3739 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3740 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3741 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3742 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3743 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3744 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3745 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3746 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3747 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3748 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3749 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3750 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3751 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003752 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003753
3754 struct flash_comp gen2_flash_types[] = {
3755 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3756 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3757 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3758 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3759 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3760 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3761 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3762 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3763 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3764 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3765 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3766 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3767 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3768 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3769 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3770 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003771 };
3772
Sathya Perlaca34fe32012-11-06 17:48:56 +00003773 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003774 pflashcomp = gen3_flash_types;
3775 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003776 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003777 } else {
3778 pflashcomp = gen2_flash_types;
3779 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003780 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003781 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003782
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003783 /* Get flash section info*/
3784 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3785 if (!fsec) {
3786 dev_err(&adapter->pdev->dev,
3787 "Invalid Cookie. UFI corrupted ?\n");
3788 return -1;
3789 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003790 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003791 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003792 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003793
3794 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3795 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3796 continue;
3797
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003798 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3799 !phy_flashing_required(adapter))
3800 continue;
3801
3802 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3803 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303804 pflashcomp[i].offset,
3805 pflashcomp[i].size,
3806 filehdr_size +
3807 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003808 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003809 continue;
3810 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003811
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003812 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003813 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003814 if (p + pflashcomp[i].size > fw->data + fw->size)
3815 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003816
3817 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303818 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003819 if (status) {
3820 dev_err(&adapter->pdev->dev,
3821 "Flashing section type %d failed.\n",
3822 pflashcomp[i].img_type);
3823 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003824 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003825 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003826 return 0;
3827}
3828
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003829static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303830 const struct firmware *fw,
3831 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003832{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003833 int status = 0, i, filehdr_size = 0;
3834 int img_offset, img_size, img_optype, redboot;
3835 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3836 const u8 *p = fw->data;
3837 struct flash_section_info *fsec = NULL;
3838
3839 filehdr_size = sizeof(struct flash_file_hdr_g3);
3840 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3841 if (!fsec) {
3842 dev_err(&adapter->pdev->dev,
3843 "Invalid Cookie. UFI corrupted ?\n");
3844 return -1;
3845 }
3846
3847 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3848 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3849 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3850
3851 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3852 case IMAGE_FIRMWARE_iSCSI:
3853 img_optype = OPTYPE_ISCSI_ACTIVE;
3854 break;
3855 case IMAGE_BOOT_CODE:
3856 img_optype = OPTYPE_REDBOOT;
3857 break;
3858 case IMAGE_OPTION_ROM_ISCSI:
3859 img_optype = OPTYPE_BIOS;
3860 break;
3861 case IMAGE_OPTION_ROM_PXE:
3862 img_optype = OPTYPE_PXE_BIOS;
3863 break;
3864 case IMAGE_OPTION_ROM_FCoE:
3865 img_optype = OPTYPE_FCOE_BIOS;
3866 break;
3867 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3868 img_optype = OPTYPE_ISCSI_BACKUP;
3869 break;
3870 case IMAGE_NCSI:
3871 img_optype = OPTYPE_NCSI_FW;
3872 break;
3873 default:
3874 continue;
3875 }
3876
3877 if (img_optype == OPTYPE_REDBOOT) {
3878 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303879 img_offset, img_size,
3880 filehdr_size +
3881 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003882 if (!redboot)
3883 continue;
3884 }
3885
3886 p = fw->data;
3887 p += filehdr_size + img_offset + img_hdrs_size;
3888 if (p + img_size > fw->data + fw->size)
3889 return -1;
3890
3891 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3892 if (status) {
3893 dev_err(&adapter->pdev->dev,
3894 "Flashing section type %d failed.\n",
3895 fsec->fsec_entry[i].type);
3896 return status;
3897 }
3898 }
3899 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003900}
3901
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003902static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303903 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003904{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003905#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3906#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3907 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003908 const u8 *data_ptr = NULL;
3909 u8 *dest_image_ptr = NULL;
3910 size_t image_size = 0;
3911 u32 chunk_size = 0;
3912 u32 data_written = 0;
3913 u32 offset = 0;
3914 int status = 0;
3915 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003916 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003917
3918 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3919 dev_err(&adapter->pdev->dev,
3920 "FW Image not properly aligned. "
3921 "Length must be 4 byte aligned.\n");
3922 status = -EINVAL;
3923 goto lancer_fw_exit;
3924 }
3925
3926 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3927 + LANCER_FW_DOWNLOAD_CHUNK;
3928 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003929 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003930 if (!flash_cmd.va) {
3931 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003932 goto lancer_fw_exit;
3933 }
3934
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003935 dest_image_ptr = flash_cmd.va +
3936 sizeof(struct lancer_cmd_req_write_object);
3937 image_size = fw->size;
3938 data_ptr = fw->data;
3939
3940 while (image_size) {
3941 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3942
3943 /* Copy the image chunk content. */
3944 memcpy(dest_image_ptr, data_ptr, chunk_size);
3945
3946 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003947 chunk_size, offset,
3948 LANCER_FW_DOWNLOAD_LOCATION,
3949 &data_written, &change_status,
3950 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003951 if (status)
3952 break;
3953
3954 offset += data_written;
3955 data_ptr += data_written;
3956 image_size -= data_written;
3957 }
3958
3959 if (!status) {
3960 /* Commit the FW written */
3961 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003962 0, offset,
3963 LANCER_FW_DOWNLOAD_LOCATION,
3964 &data_written, &change_status,
3965 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003966 }
3967
3968 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05303969 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003970 if (status) {
3971 dev_err(&adapter->pdev->dev,
3972 "Firmware load error. "
3973 "Status code: 0x%x Additional Status: 0x%x\n",
3974 status, add_status);
3975 goto lancer_fw_exit;
3976 }
3977
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003978 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303979 dev_info(&adapter->pdev->dev,
3980 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003981 status = lancer_physdev_ctrl(adapter,
3982 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003983 if (status) {
3984 dev_err(&adapter->pdev->dev,
3985 "Adapter busy for FW reset.\n"
3986 "New FW will not be active.\n");
3987 goto lancer_fw_exit;
3988 }
3989 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05303990 dev_err(&adapter->pdev->dev,
3991 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003992 }
3993
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003994 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3995lancer_fw_exit:
3996 return status;
3997}
3998
Sathya Perlaca34fe32012-11-06 17:48:56 +00003999#define UFI_TYPE2 2
4000#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004001#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004002#define UFI_TYPE4 4
4003static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004004 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004005{
4006 if (fhdr == NULL)
4007 goto be_get_ufi_exit;
4008
Sathya Perlaca34fe32012-11-06 17:48:56 +00004009 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4010 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004011 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4012 if (fhdr->asic_type_rev == 0x10)
4013 return UFI_TYPE3R;
4014 else
4015 return UFI_TYPE3;
4016 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004017 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004018
4019be_get_ufi_exit:
4020 dev_err(&adapter->pdev->dev,
4021 "UFI and Interface are not compatible for flashing\n");
4022 return -1;
4023}
4024
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004025static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4026{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004027 struct flash_file_hdr_g3 *fhdr3;
4028 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004029 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004030 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004031 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004032
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004033 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004034 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4035 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004036 if (!flash_cmd.va) {
4037 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004038 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004039 }
4040
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004041 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004042 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004043
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004044 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004045
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004046 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4047 for (i = 0; i < num_imgs; i++) {
4048 img_hdr_ptr = (struct image_hdr *)(fw->data +
4049 (sizeof(struct flash_file_hdr_g3) +
4050 i * sizeof(struct image_hdr)));
4051 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004052 switch (ufi_type) {
4053 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004054 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304055 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004056 break;
4057 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004058 status = be_flash_BEx(adapter, fw, &flash_cmd,
4059 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004060 break;
4061 case UFI_TYPE3:
4062 /* Do not flash this ufi on BE3-R cards */
4063 if (adapter->asic_rev < 0x10)
4064 status = be_flash_BEx(adapter, fw,
4065 &flash_cmd,
4066 num_imgs);
4067 else {
4068 status = -1;
4069 dev_err(&adapter->pdev->dev,
4070 "Can't load BE3 UFI on BE3R\n");
4071 }
4072 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004073 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004074 }
4075
Sathya Perlaca34fe32012-11-06 17:48:56 +00004076 if (ufi_type == UFI_TYPE2)
4077 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004078 else if (ufi_type == -1)
4079 status = -1;
4080
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004081 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4082 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004083 if (status) {
4084 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004085 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004086 }
4087
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004088 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004089
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004090be_fw_exit:
4091 return status;
4092}
4093
4094int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4095{
4096 const struct firmware *fw;
4097 int status;
4098
4099 if (!netif_running(adapter->netdev)) {
4100 dev_err(&adapter->pdev->dev,
4101 "Firmware load not allowed (interface is down)\n");
4102 return -1;
4103 }
4104
4105 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4106 if (status)
4107 goto fw_exit;
4108
4109 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4110
4111 if (lancer_chip(adapter))
4112 status = lancer_fw_download(adapter, fw);
4113 else
4114 status = be_fw_download(adapter, fw);
4115
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004116 if (!status)
4117 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4118 adapter->fw_on_flash);
4119
Ajit Khaparde84517482009-09-04 03:12:16 +00004120fw_exit:
4121 release_firmware(fw);
4122 return status;
4123}
4124
Sathya Perla748b5392014-05-09 13:29:13 +05304125static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004126{
4127 struct be_adapter *adapter = netdev_priv(dev);
4128 struct nlattr *attr, *br_spec;
4129 int rem;
4130 int status = 0;
4131 u16 mode = 0;
4132
4133 if (!sriov_enabled(adapter))
4134 return -EOPNOTSUPP;
4135
4136 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4137
4138 nla_for_each_nested(attr, br_spec, rem) {
4139 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4140 continue;
4141
4142 mode = nla_get_u16(attr);
4143 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4144 return -EINVAL;
4145
4146 status = be_cmd_set_hsw_config(adapter, 0, 0,
4147 adapter->if_handle,
4148 mode == BRIDGE_MODE_VEPA ?
4149 PORT_FWD_TYPE_VEPA :
4150 PORT_FWD_TYPE_VEB);
4151 if (status)
4152 goto err;
4153
4154 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4155 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4156
4157 return status;
4158 }
4159err:
4160 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4161 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4162
4163 return status;
4164}
4165
4166static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304167 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004168{
4169 struct be_adapter *adapter = netdev_priv(dev);
4170 int status = 0;
4171 u8 hsw_mode;
4172
4173 if (!sriov_enabled(adapter))
4174 return 0;
4175
4176 /* BE and Lancer chips support VEB mode only */
4177 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4178 hsw_mode = PORT_FWD_TYPE_VEB;
4179 } else {
4180 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4181 adapter->if_handle, &hsw_mode);
4182 if (status)
4183 return 0;
4184 }
4185
4186 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4187 hsw_mode == PORT_FWD_TYPE_VEPA ?
4188 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4189}
4190
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304191#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304192static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4193 __be16 port)
4194{
4195 struct be_adapter *adapter = netdev_priv(netdev);
4196 struct device *dev = &adapter->pdev->dev;
4197 int status;
4198
4199 if (lancer_chip(adapter) || BEx_chip(adapter))
4200 return;
4201
4202 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4203 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4204 be16_to_cpu(port));
4205 dev_info(dev,
4206 "Only one UDP port supported for VxLAN offloads\n");
4207 return;
4208 }
4209
4210 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4211 OP_CONVERT_NORMAL_TO_TUNNEL);
4212 if (status) {
4213 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4214 goto err;
4215 }
4216
4217 status = be_cmd_set_vxlan_port(adapter, port);
4218 if (status) {
4219 dev_warn(dev, "Failed to add VxLAN port\n");
4220 goto err;
4221 }
4222 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4223 adapter->vxlan_port = port;
4224
4225 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4226 be16_to_cpu(port));
4227 return;
4228err:
4229 be_disable_vxlan_offloads(adapter);
4230 return;
4231}
4232
4233static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4234 __be16 port)
4235{
4236 struct be_adapter *adapter = netdev_priv(netdev);
4237
4238 if (lancer_chip(adapter) || BEx_chip(adapter))
4239 return;
4240
4241 if (adapter->vxlan_port != port)
4242 return;
4243
4244 be_disable_vxlan_offloads(adapter);
4245
4246 dev_info(&adapter->pdev->dev,
4247 "Disabled VxLAN offloads for UDP port %d\n",
4248 be16_to_cpu(port));
4249}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304250#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304251
stephen hemmingere5686ad2012-01-05 19:10:25 +00004252static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004253 .ndo_open = be_open,
4254 .ndo_stop = be_close,
4255 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004256 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 .ndo_set_mac_address = be_mac_addr_set,
4258 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004259 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004260 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004261 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4262 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004263 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004264 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004265 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004266 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304267 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004268#ifdef CONFIG_NET_POLL_CONTROLLER
4269 .ndo_poll_controller = be_netpoll,
4270#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004271 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4272 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304273#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304274 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304275#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304276#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304277 .ndo_add_vxlan_port = be_add_vxlan_port,
4278 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304279#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004280};
4281
4282static void be_netdev_init(struct net_device *netdev)
4283{
4284 struct be_adapter *adapter = netdev_priv(netdev);
4285
Sathya Perlac9c47142014-03-27 10:46:19 +05304286 if (skyhawk_chip(adapter)) {
4287 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4288 NETIF_F_TSO | NETIF_F_TSO6 |
4289 NETIF_F_GSO_UDP_TUNNEL;
4290 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4291 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004292 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004293 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004294 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004295 if (be_multi_rxq(adapter))
4296 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004297
4298 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004299 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004300
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004301 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004302 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004303
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004304 netdev->priv_flags |= IFF_UNICAST_FLT;
4305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004306 netdev->flags |= IFF_MULTICAST;
4307
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004308 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004310 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004311
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004312 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004313}
4314
4315static void be_unmap_pci_bars(struct be_adapter *adapter)
4316{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004317 if (adapter->csr)
4318 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004319 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004320 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004321}
4322
Sathya Perlace66f782012-11-06 17:48:58 +00004323static int db_bar(struct be_adapter *adapter)
4324{
4325 if (lancer_chip(adapter) || !be_physfn(adapter))
4326 return 0;
4327 else
4328 return 4;
4329}
4330
4331static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004332{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004333 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004334 adapter->roce_db.size = 4096;
4335 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4336 db_bar(adapter));
4337 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4338 db_bar(adapter));
4339 }
Parav Pandit045508a2012-03-26 14:27:13 +00004340 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004341}
4342
4343static int be_map_pci_bars(struct be_adapter *adapter)
4344{
4345 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004346
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004347 if (BEx_chip(adapter) && be_physfn(adapter)) {
4348 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4349 if (adapter->csr == NULL)
4350 return -ENOMEM;
4351 }
4352
Sathya Perlace66f782012-11-06 17:48:58 +00004353 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004354 if (addr == NULL)
4355 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004356 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004357
4358 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004359 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004360
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004361pci_map_err:
4362 be_unmap_pci_bars(adapter);
4363 return -ENOMEM;
4364}
4365
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004366static void be_ctrl_cleanup(struct be_adapter *adapter)
4367{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004368 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004369
4370 be_unmap_pci_bars(adapter);
4371
4372 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004373 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4374 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004375
Sathya Perla5b8821b2011-08-02 19:57:44 +00004376 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004377 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004378 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4379 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004380}
4381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004382static int be_ctrl_init(struct be_adapter *adapter)
4383{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004384 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4385 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004386 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004387 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004389
Sathya Perlace66f782012-11-06 17:48:58 +00004390 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4391 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4392 SLI_INTF_FAMILY_SHIFT;
4393 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4394
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395 status = be_map_pci_bars(adapter);
4396 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004397 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004398
4399 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004400 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4401 mbox_mem_alloc->size,
4402 &mbox_mem_alloc->dma,
4403 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004404 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004405 status = -ENOMEM;
4406 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004407 }
4408 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4409 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4410 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4411 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004412
Sathya Perla5b8821b2011-08-02 19:57:44 +00004413 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004414 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4415 rx_filter->size, &rx_filter->dma,
4416 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004417 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004418 status = -ENOMEM;
4419 goto free_mbox;
4420 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004421
Ivan Vecera29849612010-12-14 05:43:19 +00004422 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004423 spin_lock_init(&adapter->mcc_lock);
4424 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425
Suresh Reddy5eeff632014-01-06 13:02:24 +05304426 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004427 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004428 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004429
4430free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004431 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4432 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004433
4434unmap_pci_bars:
4435 be_unmap_pci_bars(adapter);
4436
4437done:
4438 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004439}
4440
4441static void be_stats_cleanup(struct be_adapter *adapter)
4442{
Sathya Perla3abcded2010-10-03 22:12:27 -07004443 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004444
4445 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004446 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4447 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004448}
4449
4450static int be_stats_init(struct be_adapter *adapter)
4451{
Sathya Perla3abcded2010-10-03 22:12:27 -07004452 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004453
Sathya Perlaca34fe32012-11-06 17:48:56 +00004454 if (lancer_chip(adapter))
4455 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4456 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004457 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004458 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004459 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004460 else
4461 /* ALL non-BE ASICs */
4462 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004463
Joe Perchesede23fa82013-08-26 22:45:23 -07004464 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4465 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004466 if (cmd->va == NULL)
4467 return -1;
4468 return 0;
4469}
4470
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004471static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004472{
4473 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004474
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004475 if (!adapter)
4476 return;
4477
Parav Pandit045508a2012-03-26 14:27:13 +00004478 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004479 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004480
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004481 cancel_delayed_work_sync(&adapter->func_recovery_work);
4482
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004483 unregister_netdev(adapter->netdev);
4484
Sathya Perla5fb379e2009-06-18 00:02:59 +00004485 be_clear(adapter);
4486
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004487 /* tell fw we're done with firing cmds */
4488 be_cmd_fw_clean(adapter);
4489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004490 be_stats_cleanup(adapter);
4491
4492 be_ctrl_cleanup(adapter);
4493
Sathya Perlad6b6d982012-09-05 01:56:48 +00004494 pci_disable_pcie_error_reporting(pdev);
4495
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004496 pci_release_regions(pdev);
4497 pci_disable_device(pdev);
4498
4499 free_netdev(adapter->netdev);
4500}
4501
Sathya Perla39f1d942012-05-08 19:41:24 +00004502static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004503{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304504 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004505
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004506 status = be_cmd_get_cntl_attributes(adapter);
4507 if (status)
4508 return status;
4509
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004510 /* Must be a power of 2 or else MODULO will BUG_ON */
4511 adapter->be_get_temp_freq = 64;
4512
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304513 if (BEx_chip(adapter)) {
4514 level = be_cmd_get_fw_log_level(adapter);
4515 adapter->msg_enable =
4516 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4517 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004518
Sathya Perla92bf14a2013-08-27 16:57:32 +05304519 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004520 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521}
4522
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004523static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004524{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004525 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004526 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004527
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004528 status = lancer_test_and_set_rdy_state(adapter);
4529 if (status)
4530 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004531
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004532 if (netif_running(adapter->netdev))
4533 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004534
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004535 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004536
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004537 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004538
4539 status = be_setup(adapter);
4540 if (status)
4541 goto err;
4542
4543 if (netif_running(adapter->netdev)) {
4544 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004545 if (status)
4546 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004547 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004548
Somnath Kotur4bebb562013-12-05 12:07:55 +05304549 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004550 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004551err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004552 if (status == -EAGAIN)
4553 dev_err(dev, "Waiting for resource provisioning\n");
4554 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304555 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004556
4557 return status;
4558}
4559
4560static void be_func_recovery_task(struct work_struct *work)
4561{
4562 struct be_adapter *adapter =
4563 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004564 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004565
4566 be_detect_error(adapter);
4567
4568 if (adapter->hw_error && lancer_chip(adapter)) {
4569
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004570 rtnl_lock();
4571 netif_device_detach(adapter->netdev);
4572 rtnl_unlock();
4573
4574 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004575 if (!status)
4576 netif_device_attach(adapter->netdev);
4577 }
4578
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004579 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4580 * no need to attempt further recovery.
4581 */
4582 if (!status || status == -EAGAIN)
4583 schedule_delayed_work(&adapter->func_recovery_work,
4584 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004585}
4586
4587static void be_worker(struct work_struct *work)
4588{
4589 struct be_adapter *adapter =
4590 container_of(work, struct be_adapter, work.work);
4591 struct be_rx_obj *rxo;
4592 int i;
4593
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004594 /* when interrupts are not yet enabled, just reap any pending
4595 * mcc completions */
4596 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004597 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004598 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004599 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004600 goto reschedule;
4601 }
4602
4603 if (!adapter->stats_cmd_sent) {
4604 if (lancer_chip(adapter))
4605 lancer_cmd_get_pport_stats(adapter,
4606 &adapter->stats_cmd);
4607 else
4608 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4609 }
4610
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304611 if (be_physfn(adapter) &&
4612 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004613 be_cmd_get_die_temperature(adapter);
4614
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004615 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304616 /* Replenish RX-queues starved due to memory
4617 * allocation failures.
4618 */
4619 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004620 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004621 }
4622
Sathya Perla2632baf2013-10-01 16:00:00 +05304623 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004624
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004625reschedule:
4626 adapter->work_counter++;
4627 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4628}
4629
Sathya Perla257a3fe2013-06-14 15:54:51 +05304630/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004631static bool be_reset_required(struct be_adapter *adapter)
4632{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304633 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004634}
4635
Sathya Perlad3791422012-09-28 04:39:44 +00004636static char *mc_name(struct be_adapter *adapter)
4637{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304638 char *str = ""; /* default */
4639
4640 switch (adapter->mc_type) {
4641 case UMC:
4642 str = "UMC";
4643 break;
4644 case FLEX10:
4645 str = "FLEX10";
4646 break;
4647 case vNIC1:
4648 str = "vNIC-1";
4649 break;
4650 case nPAR:
4651 str = "nPAR";
4652 break;
4653 case UFP:
4654 str = "UFP";
4655 break;
4656 case vNIC2:
4657 str = "vNIC-2";
4658 break;
4659 default:
4660 str = "";
4661 }
4662
4663 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004664}
4665
4666static inline char *func_name(struct be_adapter *adapter)
4667{
4668 return be_physfn(adapter) ? "PF" : "VF";
4669}
4670
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004671static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004672{
4673 int status = 0;
4674 struct be_adapter *adapter;
4675 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004676 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004677
4678 status = pci_enable_device(pdev);
4679 if (status)
4680 goto do_none;
4681
4682 status = pci_request_regions(pdev, DRV_NAME);
4683 if (status)
4684 goto disable_dev;
4685 pci_set_master(pdev);
4686
Sathya Perla7f640062012-06-05 19:37:20 +00004687 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004688 if (netdev == NULL) {
4689 status = -ENOMEM;
4690 goto rel_reg;
4691 }
4692 adapter = netdev_priv(netdev);
4693 adapter->pdev = pdev;
4694 pci_set_drvdata(pdev, adapter);
4695 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004696 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004697
Russell King4c15c242013-06-26 23:49:11 +01004698 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004699 if (!status) {
4700 netdev->features |= NETIF_F_HIGHDMA;
4701 } else {
Russell King4c15c242013-06-26 23:49:11 +01004702 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004703 if (status) {
4704 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4705 goto free_netdev;
4706 }
4707 }
4708
Ajit Khapardeea58c182013-10-18 16:06:24 -05004709 if (be_physfn(adapter)) {
4710 status = pci_enable_pcie_error_reporting(pdev);
4711 if (!status)
4712 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4713 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004714
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004715 status = be_ctrl_init(adapter);
4716 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004717 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004718
Sathya Perla2243e2e2009-11-22 22:02:03 +00004719 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004720 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004721 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004722 if (status)
4723 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004724 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004725
Sathya Perla39f1d942012-05-08 19:41:24 +00004726 if (be_reset_required(adapter)) {
4727 status = be_cmd_reset_function(adapter);
4728 if (status)
4729 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004730
Kalesh AP2d177be2013-04-28 22:22:29 +00004731 /* Wait for interrupts to quiesce after an FLR */
4732 msleep(100);
4733 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004734
4735 /* Allow interrupts for other ULPs running on NIC function */
4736 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004737
Kalesh AP2d177be2013-04-28 22:22:29 +00004738 /* tell fw we're ready to fire cmds */
4739 status = be_cmd_fw_init(adapter);
4740 if (status)
4741 goto ctrl_clean;
4742
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004743 status = be_stats_init(adapter);
4744 if (status)
4745 goto ctrl_clean;
4746
Sathya Perla39f1d942012-05-08 19:41:24 +00004747 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004748 if (status)
4749 goto stats_clean;
4750
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004751 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004752 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004753 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004754
Sathya Perla5fb379e2009-06-18 00:02:59 +00004755 status = be_setup(adapter);
4756 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004757 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004758
Sathya Perla3abcded2010-10-03 22:12:27 -07004759 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004760 status = register_netdev(netdev);
4761 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004762 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004763
Parav Pandit045508a2012-03-26 14:27:13 +00004764 be_roce_dev_add(adapter);
4765
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004766 schedule_delayed_work(&adapter->func_recovery_work,
4767 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004768
4769 be_cmd_query_port_name(adapter, &port_name);
4770
Sathya Perlad3791422012-09-28 04:39:44 +00004771 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4772 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004773
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004774 return 0;
4775
Sathya Perla5fb379e2009-06-18 00:02:59 +00004776unsetup:
4777 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004778stats_clean:
4779 be_stats_cleanup(adapter);
4780ctrl_clean:
4781 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004782free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004783 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004784rel_reg:
4785 pci_release_regions(pdev);
4786disable_dev:
4787 pci_disable_device(pdev);
4788do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004789 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004790 return status;
4791}
4792
4793static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4794{
4795 struct be_adapter *adapter = pci_get_drvdata(pdev);
4796 struct net_device *netdev = adapter->netdev;
4797
Suresh Reddy76a9e082014-01-15 13:23:40 +05304798 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004799 be_setup_wol(adapter, true);
4800
Ajit Khaparded4360d62013-11-22 12:51:09 -06004801 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004802 cancel_delayed_work_sync(&adapter->func_recovery_work);
4803
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004804 netif_device_detach(netdev);
4805 if (netif_running(netdev)) {
4806 rtnl_lock();
4807 be_close(netdev);
4808 rtnl_unlock();
4809 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004810 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004811
4812 pci_save_state(pdev);
4813 pci_disable_device(pdev);
4814 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4815 return 0;
4816}
4817
4818static int be_resume(struct pci_dev *pdev)
4819{
4820 int status = 0;
4821 struct be_adapter *adapter = pci_get_drvdata(pdev);
4822 struct net_device *netdev = adapter->netdev;
4823
4824 netif_device_detach(netdev);
4825
4826 status = pci_enable_device(pdev);
4827 if (status)
4828 return status;
4829
Yijing Wang1ca01512013-06-27 20:53:42 +08004830 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004831 pci_restore_state(pdev);
4832
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304833 status = be_fw_wait_ready(adapter);
4834 if (status)
4835 return status;
4836
Ajit Khaparded4360d62013-11-22 12:51:09 -06004837 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004838 /* tell fw we're ready to fire cmds */
4839 status = be_cmd_fw_init(adapter);
4840 if (status)
4841 return status;
4842
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004843 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004844 if (netif_running(netdev)) {
4845 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004846 be_open(netdev);
4847 rtnl_unlock();
4848 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004849
4850 schedule_delayed_work(&adapter->func_recovery_work,
4851 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004852 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004853
Suresh Reddy76a9e082014-01-15 13:23:40 +05304854 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004855 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004856
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004857 return 0;
4858}
4859
Sathya Perla82456b02010-02-17 01:35:37 +00004860/*
4861 * An FLR will stop BE from DMAing any data.
4862 */
4863static void be_shutdown(struct pci_dev *pdev)
4864{
4865 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004866
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004867 if (!adapter)
4868 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004869
Sathya Perla0f4a6822011-03-21 20:49:28 +00004870 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004871 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004872
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004873 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004874
Ajit Khaparde57841862011-04-06 18:08:43 +00004875 be_cmd_reset_function(adapter);
4876
Sathya Perla82456b02010-02-17 01:35:37 +00004877 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004878}
4879
Sathya Perlacf588472010-02-14 21:22:01 +00004880static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05304881 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00004882{
4883 struct be_adapter *adapter = pci_get_drvdata(pdev);
4884 struct net_device *netdev = adapter->netdev;
4885
4886 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4887
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004888 if (!adapter->eeh_error) {
4889 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004890
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004891 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004892
Sathya Perlacf588472010-02-14 21:22:01 +00004893 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004894 netif_device_detach(netdev);
4895 if (netif_running(netdev))
4896 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004897 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004898
4899 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004900 }
Sathya Perlacf588472010-02-14 21:22:01 +00004901
4902 if (state == pci_channel_io_perm_failure)
4903 return PCI_ERS_RESULT_DISCONNECT;
4904
4905 pci_disable_device(pdev);
4906
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004907 /* The error could cause the FW to trigger a flash debug dump.
4908 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004909 * can cause it not to recover; wait for it to finish.
4910 * Wait only for first function as it is needed only once per
4911 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004912 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004913 if (pdev->devfn == 0)
4914 ssleep(30);
4915
Sathya Perlacf588472010-02-14 21:22:01 +00004916 return PCI_ERS_RESULT_NEED_RESET;
4917}
4918
4919static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4920{
4921 struct be_adapter *adapter = pci_get_drvdata(pdev);
4922 int status;
4923
4924 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004925
4926 status = pci_enable_device(pdev);
4927 if (status)
4928 return PCI_ERS_RESULT_DISCONNECT;
4929
4930 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004931 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004932 pci_restore_state(pdev);
4933
4934 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004935 dev_info(&adapter->pdev->dev,
4936 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004937 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004938 if (status)
4939 return PCI_ERS_RESULT_DISCONNECT;
4940
Sathya Perlad6b6d982012-09-05 01:56:48 +00004941 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004942 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004943 return PCI_ERS_RESULT_RECOVERED;
4944}
4945
4946static void be_eeh_resume(struct pci_dev *pdev)
4947{
4948 int status = 0;
4949 struct be_adapter *adapter = pci_get_drvdata(pdev);
4950 struct net_device *netdev = adapter->netdev;
4951
4952 dev_info(&adapter->pdev->dev, "EEH resume\n");
4953
4954 pci_save_state(pdev);
4955
Kalesh AP2d177be2013-04-28 22:22:29 +00004956 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004957 if (status)
4958 goto err;
4959
Kalesh AP03a58ba2014-05-13 14:03:11 +05304960 /* On some BE3 FW versions, after a HW reset,
4961 * interrupts will remain disabled for each function.
4962 * So, explicitly enable interrupts
4963 */
4964 be_intr_set(adapter, true);
4965
Kalesh AP2d177be2013-04-28 22:22:29 +00004966 /* tell fw we're ready to fire cmds */
4967 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004968 if (status)
4969 goto err;
4970
Sathya Perlacf588472010-02-14 21:22:01 +00004971 status = be_setup(adapter);
4972 if (status)
4973 goto err;
4974
4975 if (netif_running(netdev)) {
4976 status = be_open(netdev);
4977 if (status)
4978 goto err;
4979 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004980
4981 schedule_delayed_work(&adapter->func_recovery_work,
4982 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004983 netif_device_attach(netdev);
4984 return;
4985err:
4986 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004987}
4988
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004989static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004990 .error_detected = be_eeh_err_detected,
4991 .slot_reset = be_eeh_reset,
4992 .resume = be_eeh_resume,
4993};
4994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004995static struct pci_driver be_driver = {
4996 .name = DRV_NAME,
4997 .id_table = be_dev_ids,
4998 .probe = be_probe,
4999 .remove = be_remove,
5000 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005001 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005002 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005003 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004};
5005
5006static int __init be_init_module(void)
5007{
Joe Perches8e95a202009-12-03 07:58:21 +00005008 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5009 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005010 printk(KERN_WARNING DRV_NAME
5011 " : Module param rx_frag_size must be 2048/4096/8192."
5012 " Using 2048\n");
5013 rx_frag_size = 2048;
5014 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005016 return pci_register_driver(&be_driver);
5017}
5018module_init(be_init_module);
5019
5020static void __exit be_exit_module(void)
5021{
5022 pci_unregister_driver(&be_driver);
5023}
5024module_exit(be_exit_module);