blob: a3c6a27d13fa59f29563e61602a47a1fd1863f0b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137 u16 len, u16 entry_size)
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 bool arm, bool clear_int, u16 num_popped)
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
214 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000215
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000216 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000217 return;
218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219 if (arm)
220 val |= 1 << DB_EQ_REARM_SHIFT;
221 if (clear_int)
222 val |= 1 << DB_EQ_CLR_SHIFT;
223 val |= 1 << DB_EQ_EVNT_SHIFT;
224 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000225 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226}
227
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229{
230 u32 val = 0;
231 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000232 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
233 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000234
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000235 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000236 return;
237
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238 if (arm)
239 val |= 1 << DB_CQ_REARM_SHIFT;
240 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242}
243
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244static int be_mac_addr_set(struct net_device *netdev, void *p)
245{
246 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530247 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530249 int status;
250 u8 mac[ETH_ALEN];
251 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000253 if (!is_valid_ether_addr(addr->sa_data))
254 return -EADDRNOTAVAIL;
255
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530256 /* Proceed further only if, User provided MAC is different
257 * from active MAC
258 */
259 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
260 return 0;
261
Sathya Perla5a712c12013-07-23 15:24:59 +0530262 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
263 * privilege or if PF did not provision the new MAC address.
264 * On BE3, this cmd will always fail if the VF doesn't have the
265 * FILTMGMT privilege. This failure is OK, only if the PF programmed
266 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000267 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
269 adapter->if_handle, &adapter->pmac_id[0], 0);
270 if (!status) {
271 curr_pmac_id = adapter->pmac_id[0];
272
273 /* Delete the old programmed MAC. This call may fail if the
274 * old MAC was already deleted by the PF driver.
275 */
276 if (adapter->pmac_id[0] != old_pmac_id)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000279 }
280
Sathya Perla5a712c12013-07-23 15:24:59 +0530281 /* Decide if the new MAC is successfully activated only after
282 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530284 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
285 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000286 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000287 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700288
Sathya Perla5a712c12013-07-23 15:24:59 +0530289 /* The MAC change did not happen, either due to lack of privilege
290 * or PF didn't pre-provision.
291 */
dingtianhong61d23e92013-12-30 15:40:43 +0800292 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 status = -EPERM;
294 goto err;
295 }
296
Somnath Koture3a7ae22011-10-27 07:14:05 +0000297 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000299 return 0;
300err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302 return status;
303}
304
Sathya Perlaca34fe32012-11-06 17:48:56 +0000305/* BE2 supports only v0 cmd */
306static void *hw_stats_from_cmd(struct be_adapter *adapter)
307{
308 if (BE2_chip(adapter)) {
309 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500312 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000313 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else {
317 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000320 }
321}
322
323/* BE2 supports only v0 cmd */
324static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
328
329 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000331 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else {
335 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000338 }
339}
340
341static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
344 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
345 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 &rxf_stats->port[adapter->port_num];
348 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349
Sathya Perlaac124ff2011-07-25 19:10:14 +0000350 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->rx_pause_frames = port_stats->rx_pause_frames;
352 drvs->rx_crc_errors = port_stats->rx_crc_errors;
353 drvs->rx_control_frames = port_stats->rx_control_frames;
354 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
355 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
356 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
358 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
359 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
360 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
361 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
362 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 drvs->rx_dropped_header_too_small =
367 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000368 drvs->rx_address_filtered =
369 port_stats->rx_address_filtered +
370 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->rx_alignment_symbol_errors =
372 port_stats->rx_alignment_symbol_errors;
373
374 drvs->tx_pauseframes = port_stats->tx_pauseframes;
375 drvs->tx_controlframes = port_stats->tx_controlframes;
376
377 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 drvs->forwarded_packets = rxf_stats->forwarded_packets;
384 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
386 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
388}
389
Sathya Perlaca34fe32012-11-06 17:48:56 +0000390static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
393 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
394 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 &rxf_stats->port[adapter->port_num];
397 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000400 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
401 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 drvs->rx_pause_frames = port_stats->rx_pause_frames;
403 drvs->rx_crc_errors = port_stats->rx_crc_errors;
404 drvs->rx_control_frames = port_stats->rx_control_frames;
405 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
406 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
407 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
408 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
409 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
410 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
411 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
412 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
413 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
414 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
415 drvs->rx_dropped_header_too_small =
416 port_stats->rx_dropped_header_too_small;
417 drvs->rx_input_fifo_overflow_drop =
418 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000419 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->rx_alignment_symbol_errors =
421 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->tx_pauseframes = port_stats->tx_pauseframes;
424 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000425 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->jabber_events = port_stats->jabber_events;
427 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->forwarded_packets = rxf_stats->forwarded_packets;
430 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000431 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
432 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
434}
435
Ajit Khaparde61000862013-10-03 16:16:33 -0500436static void populate_be_v2_stats(struct be_adapter *adapter)
437{
438 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
439 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
440 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
441 struct be_port_rxf_stats_v2 *port_stats =
442 &rxf_stats->port[adapter->port_num];
443 struct be_drv_stats *drvs = &adapter->drv_stats;
444
445 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
446 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
447 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
448 drvs->rx_pause_frames = port_stats->rx_pause_frames;
449 drvs->rx_crc_errors = port_stats->rx_crc_errors;
450 drvs->rx_control_frames = port_stats->rx_control_frames;
451 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
452 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
453 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
454 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
455 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
456 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
457 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
458 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
459 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
460 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
461 drvs->rx_dropped_header_too_small =
462 port_stats->rx_dropped_header_too_small;
463 drvs->rx_input_fifo_overflow_drop =
464 port_stats->rx_input_fifo_overflow_drop;
465 drvs->rx_address_filtered = port_stats->rx_address_filtered;
466 drvs->rx_alignment_symbol_errors =
467 port_stats->rx_alignment_symbol_errors;
468 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
469 drvs->tx_pauseframes = port_stats->tx_pauseframes;
470 drvs->tx_controlframes = port_stats->tx_controlframes;
471 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
472 drvs->jabber_events = port_stats->jabber_events;
473 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
474 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
475 drvs->forwarded_packets = rxf_stats->forwarded_packets;
476 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
477 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 if (be_roce_supported(adapter)) {
481 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483 drvs->rx_roce_frames = port_stats->roce_frames_received;
484 drvs->roce_drops_crc = port_stats->roce_drops_crc;
485 drvs->roce_drops_payload_len =
486 port_stats->roce_drops_payload_len;
487 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500488}
489
Selvin Xavier005d5692011-05-16 07:36:35 +0000490static void populate_lancer_stats(struct be_adapter *adapter)
491{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492
Selvin Xavier005d5692011-05-16 07:36:35 +0000493 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494 struct lancer_pport_stats *pport_stats =
495 pport_stats_from_cmd(adapter);
496
497 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
499 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
500 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000502 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
504 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
505 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
506 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
507 drvs->rx_dropped_tcp_length =
508 pport_stats->rx_dropped_invalid_tcp_length;
509 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
510 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
511 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
512 drvs->rx_dropped_header_too_small =
513 pport_stats->rx_dropped_header_too_small;
514 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000515 drvs->rx_address_filtered =
516 pport_stats->rx_address_filtered +
517 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
521 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 drvs->forwarded_packets = pport_stats->num_forwards_lo;
524 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000528
Sathya Perla09c1c682011-08-22 19:41:53 +0000529static void accumulate_16bit_val(u32 *acc, u16 val)
530{
531#define lo(x) (x & 0xFFFF)
532#define hi(x) (x & 0xFFFF0000)
533 bool wrapped = val < lo(*acc);
534 u32 newacc = hi(*acc) + val;
535
536 if (wrapped)
537 newacc += 65536;
538 ACCESS_ONCE(*acc) = newacc;
539}
540
Jingoo Han4188e7d2013-08-05 18:02:02 +0900541static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000542 struct be_rx_obj *rxo,
543 u32 erx_stat)
544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000555void be_parse_stats(struct be_adapter *adapter)
556{
Ajit Khaparde61000862013-10-03 16:16:33 -0500557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558 struct be_rx_obj *rxo;
559 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000560 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else if (BE3_chip(adapter))
568 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000569 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else
571 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000578 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000579}
580
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000585 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700586 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000587 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000588 u64 pkts, bytes;
589 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700590 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Sathya Perla3abcded2010-10-03 22:12:27 -0700592 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
594 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 pkts = rx_stats(rxo)->rx_pkts;
597 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700598 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 stats->rx_packets += pkts;
600 stats->rx_bytes += bytes;
601 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
602 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
603 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700604 }
605
Sathya Perla3c8def92011-06-12 20:01:58 +0000606 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 const struct be_tx_stats *tx_stats = tx_stats(txo);
608 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 pkts = tx_stats(txo)->tx_pkts;
611 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 stats->tx_packets += pkts;
614 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000615 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616
617 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000618 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000619 drvs->rx_alignment_symbol_errors +
620 drvs->rx_in_range_errors +
621 drvs->rx_out_range_errors +
622 drvs->rx_frame_too_long +
623 drvs->rx_dropped_too_small +
624 drvs->rx_dropped_too_short +
625 drvs->rx_dropped_header_too_small +
626 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000631 drvs->rx_out_range_errors +
632 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000633
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635
636 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000638
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 /* receiver fifo overrun */
640 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000642 drvs->rx_input_fifo_overflow_drop +
643 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645}
646
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000647void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 struct net_device *netdev = adapter->netdev;
650
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000651 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000652 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000653 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000655
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530656 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000657 netif_carrier_on(netdev);
658 else
659 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660}
661
Sathya Perla3c8def92011-06-12 20:01:58 +0000662static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000663 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664{
Sathya Perla3c8def92011-06-12 20:01:58 +0000665 struct be_tx_stats *stats = tx_stats(txo);
666
Sathya Perlaab1594e2011-07-25 19:10:15 +0000667 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000668 stats->tx_reqs++;
669 stats->tx_wrbs += wrb_cnt;
670 stats->tx_bytes += copied;
671 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000673 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000674 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675}
676
677/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000678static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700681 int cnt = (skb->len > skb->data_len);
682
683 cnt += skb_shinfo(skb)->nr_frags;
684
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 /* to account for hdr wrb */
686 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000687 if (lancer_chip(adapter) || !(cnt & 1)) {
688 *dummy = false;
689 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690 /* add a dummy to make it an even num */
691 cnt++;
692 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000693 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
695 return cnt;
696}
697
698static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
699{
700 wrb->frag_pa_hi = upper_32_bits(addr);
701 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
702 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000703 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707 struct sk_buff *skb)
708{
709 u8 vlan_prio;
710 u16 vlan_tag;
711
712 vlan_tag = vlan_tx_tag_get(skb);
713 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714 /* If vlan priority provided by OS is NOT in available bmap */
715 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717 adapter->recommended_prio;
718
719 return vlan_tag;
720}
721
Sathya Perlac9c47142014-03-27 10:46:19 +0530722/* Used only for IP tunnel packets */
723static u16 skb_inner_ip_proto(struct sk_buff *skb)
724{
725 return (inner_ip_hdr(skb)->version == 4) ?
726 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727}
728
729static u16 skb_ip_proto(struct sk_buff *skb)
730{
731 return (ip_hdr(skb)->version == 4) ?
732 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733}
734
Somnath Koturcc4ce022010-10-21 07:11:14 -0700735static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000736 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737{
Sathya Perlac9c47142014-03-27 10:46:19 +0530738 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740 memset(hdr, 0, sizeof(*hdr));
741
742 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
743
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000744 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
747 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000748 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530751 if (skb->encapsulation) {
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
753 proto = skb_inner_ip_proto(skb);
754 } else {
755 proto = skb_ip_proto(skb);
756 }
757 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530759 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
761 }
762
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700763 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000765 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700766 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 }
768
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000769 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
773 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
774}
775
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000776static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000777 bool unmap_single)
778{
779 dma_addr_t dma;
780
781 be_dws_le_to_cpu(wrb, sizeof(*wrb));
782
783 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000784 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000785 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000786 dma_unmap_single(dev, dma, wrb->frag_len,
787 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000789 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000790 }
791}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792
Sathya Perla3c8def92011-06-12 20:01:58 +0000793static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000794 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796{
Sathya Perla7101e112010-03-22 20:41:12 +0000797 dma_addr_t busaddr;
798 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000799 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 struct be_eth_wrb *wrb;
802 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000803 bool map_single = false;
804 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 hdr = queue_head_node(txq);
807 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000808 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700811 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000812 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
813 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000814 goto dma_err;
815 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700816 wrb = queue_head_node(txq);
817 wrb_fill(wrb, busaddr, len);
818 be_dws_cpu_to_le(wrb, sizeof(*wrb));
819 queue_head_inc(txq);
820 copied += len;
821 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822
David S. Millerebc8d2a2009-06-09 01:01:31 -0700823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000824 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700825 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000826 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000827 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000828 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000829 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000831 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700832 be_dws_cpu_to_le(wrb, sizeof(*wrb));
833 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000834 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835 }
836
837 if (dummy_wrb) {
838 wrb = queue_head_node(txq);
839 wrb_fill(wrb, 0, 0);
840 be_dws_cpu_to_le(wrb, sizeof(*wrb));
841 queue_head_inc(txq);
842 }
843
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000844 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 be_dws_cpu_to_le(hdr, sizeof(*hdr));
846
847 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000848dma_err:
849 txq->head = map_head;
850 while (copied) {
851 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000852 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000853 map_single = false;
854 copied -= wrb->frag_len;
855 queue_head_inc(txq);
856 }
857 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858}
859
Somnath Kotur93040ae2012-06-26 22:32:10 +0000860static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000861 struct sk_buff *skb,
862 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000863{
864 u16 vlan_tag = 0;
865
866 skb = skb_share_check(skb, GFP_ATOMIC);
867 if (unlikely(!skb))
868 return skb;
869
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000870 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000871 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530872
873 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
874 if (!vlan_tag)
875 vlan_tag = adapter->pvid;
876 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
877 * skip VLAN insertion
878 */
879 if (skip_hw_vlan)
880 *skip_hw_vlan = true;
881 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000882
883 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400884 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 if (unlikely(!skb))
886 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000887 skb->vlan_tci = 0;
888 }
889
890 /* Insert the outer VLAN, if any */
891 if (adapter->qnq_vid) {
892 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400893 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 if (unlikely(!skb))
895 return skb;
896 if (skip_hw_vlan)
897 *skip_hw_vlan = true;
898 }
899
Somnath Kotur93040ae2012-06-26 22:32:10 +0000900 return skb;
901}
902
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000903static bool be_ipv6_exthdr_check(struct sk_buff *skb)
904{
905 struct ethhdr *eh = (struct ethhdr *)skb->data;
906 u16 offset = ETH_HLEN;
907
908 if (eh->h_proto == htons(ETH_P_IPV6)) {
909 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
910
911 offset += sizeof(struct ipv6hdr);
912 if (ip6h->nexthdr != NEXTHDR_TCP &&
913 ip6h->nexthdr != NEXTHDR_UDP) {
914 struct ipv6_opt_hdr *ehdr =
915 (struct ipv6_opt_hdr *) (skb->data + offset);
916
917 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
918 if (ehdr->hdrlen == 0xff)
919 return true;
920 }
921 }
922 return false;
923}
924
925static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
926{
927 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928}
929
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
931 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000932{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000933 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000934}
935
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530936static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
937 struct sk_buff *skb,
938 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700939{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000940 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000941 unsigned int eth_hdr_len;
942 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000943
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000944 /* For padded packets, BE HW modifies tot_len field in IP header
945 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000946 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000947 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000948 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
949 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000950 if (skb->len <= 60 &&
951 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000953 ip = (struct iphdr *)ip_hdr(skb);
954 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
955 }
956
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000957 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530958 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000959 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530960 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000961 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000962 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000963
Somnath Kotur93040ae2012-06-26 22:32:10 +0000964 /* HW has a bug wherein it will calculate CSUM for VLAN
965 * pkts even though it is disabled.
966 * Manually insert VLAN in pkt.
967 */
968 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000969 vlan_tx_tag_present(skb)) {
970 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530972 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000973 }
974
975 /* HW may lockup when VLAN HW tagging is requested on
976 * certain ipv6 packets. Drop such pkts if the HW workaround to
977 * skip HW tagging is not enabled by FW.
978 */
979 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000980 (adapter->pvid || adapter->qnq_vid) &&
981 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000982 goto tx_drop;
983
984 /* Manual VLAN tag insertion to prevent:
985 * ASIC lockup when the ASIC inserts VLAN tag into
986 * certain ipv6 packets. Insert VLAN tags in driver,
987 * and set event, completion, vlan bits accordingly
988 * in the Tx WRB.
989 */
990 if (be_ipv6_tx_stall_chk(adapter, skb) &&
991 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000992 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000993 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530994 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000995 }
996
Sathya Perlaee9c7992013-05-22 23:04:55 +0000997 return skb;
998tx_drop:
999 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301000err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001001 return NULL;
1002}
1003
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301004static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1005 struct sk_buff *skb,
1006 bool *skip_hw_vlan)
1007{
1008 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1009 * less may cause a transmit stall on that port. So the work-around is
1010 * to pad short packets (<= 32 bytes) to a 36-byte length.
1011 */
1012 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1013 if (skb_padto(skb, 36))
1014 return NULL;
1015 skb->len = 36;
1016 }
1017
1018 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1019 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1020 if (!skb)
1021 return NULL;
1022 }
1023
1024 return skb;
1025}
1026
Sathya Perlaee9c7992013-05-22 23:04:55 +00001027static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1028{
1029 struct be_adapter *adapter = netdev_priv(netdev);
1030 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1031 struct be_queue_info *txq = &txo->q;
1032 bool dummy_wrb, stopped = false;
1033 u32 wrb_cnt = 0, copied = 0;
1034 bool skip_hw_vlan = false;
1035 u32 start = txq->head;
1036
1037 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301038 if (!skb) {
1039 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001040 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301041 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001042
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001043 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001045 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1046 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001048 int gso_segs = skb_shinfo(skb)->gso_segs;
1049
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001050 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001051 BUG_ON(txo->sent_skb_list[start]);
1052 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001054 /* Ensure txq has space for the next skb; Else stop the queue
1055 * *BEFORE* ringing the tx doorbell, so that we serialze the
1056 * tx compls of the current transmit which'll wake up the queue
1057 */
Sathya Perla7101e112010-03-22 20:41:12 +00001058 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1060 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001061 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001062 stopped = true;
1063 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001065 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001066
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001067 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 } else {
1069 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301070 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001071 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 return NETDEV_TX_OK;
1074}
1075
1076static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077{
1078 struct be_adapter *adapter = netdev_priv(netdev);
1079 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001080 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1081 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 dev_info(&adapter->pdev->dev,
1083 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001084 BE_MIN_MTU,
1085 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 return -EINVAL;
1087 }
1088 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089 netdev->mtu, new_mtu);
1090 netdev->mtu = new_mtu;
1091 return 0;
1092}
1093
1094/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001095 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1096 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097 */
Sathya Perla10329df2012-06-05 19:37:18 +00001098static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099{
Sathya Perla10329df2012-06-05 19:37:18 +00001100 u16 vids[BE_NUM_VLANS_SUPPORTED];
1101 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001102 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001103
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001104 /* No need to further configure vids if in promiscuous mode */
1105 if (adapter->promiscuous)
1106 return 0;
1107
Sathya Perla92bf14a2013-08-27 16:57:32 +05301108 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001109 goto set_vlan_promisc;
1110
1111 /* Construct VLAN Table to give to HW */
1112 for (i = 0; i < VLAN_N_VID; i++)
1113 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001114 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001115
1116 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001117 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001118
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001119 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001120 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1122 goto set_vlan_promisc;
1123 dev_err(&adapter->pdev->dev,
1124 "Setting HW VLAN filtering failed.\n");
1125 } else {
1126 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1127 /* hw VLAN filtering re-enabled. */
1128 status = be_cmd_rx_filter(adapter,
1129 BE_FLAGS_VLAN_PROMISC, OFF);
1130 if (!status) {
1131 dev_info(&adapter->pdev->dev,
1132 "Disabling VLAN Promiscuous mode.\n");
1133 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001134 }
1135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001137
Sathya Perlab31c50a2009-09-17 10:30:13 -07001138 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001139
1140set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301141 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1142 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001143
1144 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1145 if (!status) {
1146 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001147 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1148 } else
1149 dev_err(&adapter->pdev->dev,
1150 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001151 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152}
1153
Patrick McHardy80d5c362013-04-19 02:04:28 +00001154static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155{
1156 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001157 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159 /* Packets with VID 0 are always received by Lancer by default */
1160 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301161 return status;
1162
1163 if (adapter->vlan_tag[vid])
1164 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001168
Somnath Kotura6b74e02014-01-21 15:50:55 +05301169 status = be_vid_config(adapter);
1170 if (status) {
1171 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001172 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301173 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301174
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001175 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176}
1177
Patrick McHardy80d5c362013-04-19 02:04:28 +00001178static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179{
1180 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001181 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001183 /* Packets with VID 0 are always received by Lancer by default */
1184 if (lancer_chip(adapter) && vid == 0)
1185 goto ret;
1186
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301188 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001189 if (!status)
1190 adapter->vlans_added--;
1191 else
1192 adapter->vlan_tag[vid] = 1;
1193ret:
1194 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195}
1196
Somnath kotur7ad09452014-03-03 14:24:43 +05301197static void be_clear_promisc(struct be_adapter *adapter)
1198{
1199 adapter->promiscuous = false;
1200 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1201
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203}
1204
Sathya Perlaa54769f2011-10-24 02:45:00 +00001205static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206{
1207 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001208 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
1210 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001211 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001212 adapter->promiscuous = true;
1213 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001215
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001216 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001217 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301218 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001219 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001220 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001221 }
1222
Sathya Perlae7b909a2009-11-22 22:01:10 +00001223 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001224 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301225 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001226 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001227 goto done;
1228 }
1229
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001230 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231 struct netdev_hw_addr *ha;
1232 int i = 1; /* First slot is claimed by the Primary MAC */
1233
1234 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1235 be_cmd_pmac_del(adapter, adapter->if_handle,
1236 adapter->pmac_id[i], 0);
1237 }
1238
Sathya Perla92bf14a2013-08-27 16:57:32 +05301239 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001240 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1241 adapter->promiscuous = true;
1242 goto done;
1243 }
1244
1245 netdev_for_each_uc_addr(ha, adapter->netdev) {
1246 adapter->uc_macs++; /* First slot is for Primary MAC */
1247 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1248 adapter->if_handle,
1249 &adapter->pmac_id[adapter->uc_macs], 0);
1250 }
1251 }
1252
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001253 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254
1255 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1256 if (status) {
1257 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1258 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001261done:
1262 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263}
1264
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001265static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1266{
1267 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001268 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001269 int status;
1270
Sathya Perla11ac75e2011-12-13 00:58:50 +00001271 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001272 return -EPERM;
1273
Sathya Perla11ac75e2011-12-13 00:58:50 +00001274 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001275 return -EINVAL;
1276
Sathya Perla3175d8c2013-07-23 15:25:03 +05301277 if (BEx_chip(adapter)) {
1278 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1279 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001280
Sathya Perla11ac75e2011-12-13 00:58:50 +00001281 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1282 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301283 } else {
1284 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1285 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001286 }
1287
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001288 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001289 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001291 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001292 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001293
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001294 return status;
1295}
1296
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001297static int be_get_vf_config(struct net_device *netdev, int vf,
1298 struct ifla_vf_info *vi)
1299{
1300 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001301 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302
Sathya Perla11ac75e2011-12-13 00:58:50 +00001303 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001304 return -EPERM;
1305
Sathya Perla11ac75e2011-12-13 00:58:50 +00001306 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001307 return -EINVAL;
1308
1309 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001310 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001311 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001313 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301314 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001315
1316 return 0;
1317}
1318
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001319static int be_set_vf_vlan(struct net_device *netdev,
1320 int vf, u16 vlan, u8 qos)
1321{
1322 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001324 int status = 0;
1325
Sathya Perla11ac75e2011-12-13 00:58:50 +00001326 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001327 return -EPERM;
1328
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001329 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001330 return -EINVAL;
1331
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001332 if (vlan || qos) {
1333 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301334 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001335 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1336 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001337 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001338 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301339 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1340 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 }
1342
Somnath Koturc5022242014-03-03 14:24:20 +05301343 if (!status)
1344 vf_cfg->vlan_tag = vlan;
1345 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001346 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301347 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001348 return status;
1349}
1350
Ajit Khapardee1d18732010-07-23 01:52:13 +00001351static int be_set_vf_tx_rate(struct net_device *netdev,
1352 int vf, int rate)
1353{
1354 struct be_adapter *adapter = netdev_priv(netdev);
1355 int status = 0;
1356
Sathya Perla11ac75e2011-12-13 00:58:50 +00001357 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001358 return -EPERM;
1359
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001360 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001361 return -EINVAL;
1362
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001363 if (rate < 100 || rate > 10000) {
1364 dev_err(&adapter->pdev->dev,
1365 "tx rate must be between 100 and 10000 Mbps\n");
1366 return -EINVAL;
1367 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368
Sathya Perlaa4018012014-03-27 10:46:18 +05301369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001370 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001371 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001372 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001373 else
1374 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001375 return status;
1376}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301377static int be_set_vf_link_state(struct net_device *netdev, int vf,
1378 int link_state)
1379{
1380 struct be_adapter *adapter = netdev_priv(netdev);
1381 int status;
1382
1383 if (!sriov_enabled(adapter))
1384 return -EPERM;
1385
1386 if (vf >= adapter->num_vfs)
1387 return -EINVAL;
1388
1389 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1390 if (!status)
1391 adapter->vf_cfg[vf].plink_tracking = link_state;
1392
1393 return status;
1394}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001395
Sathya Perla2632baf2013-10-01 16:00:00 +05301396static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1397 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398{
Sathya Perla2632baf2013-10-01 16:00:00 +05301399 aic->rx_pkts_prev = rx_pkts;
1400 aic->tx_reqs_prev = tx_pkts;
1401 aic->jiffies = now;
1402}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001403
Sathya Perla2632baf2013-10-01 16:00:00 +05301404static void be_eqd_update(struct be_adapter *adapter)
1405{
1406 struct be_set_eqd set_eqd[MAX_EVT_QS];
1407 int eqd, i, num = 0, start;
1408 struct be_aic_obj *aic;
1409 struct be_eq_obj *eqo;
1410 struct be_rx_obj *rxo;
1411 struct be_tx_obj *txo;
1412 u64 rx_pkts, tx_pkts;
1413 ulong now;
1414 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001415
Sathya Perla2632baf2013-10-01 16:00:00 +05301416 for_all_evt_queues(adapter, eqo, i) {
1417 aic = &adapter->aic_obj[eqo->idx];
1418 if (!aic->enable) {
1419 if (aic->jiffies)
1420 aic->jiffies = 0;
1421 eqd = aic->et_eqd;
1422 goto modify_eqd;
1423 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424
Sathya Perla2632baf2013-10-01 16:00:00 +05301425 rxo = &adapter->rx_obj[eqo->idx];
1426 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001427 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301428 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001429 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001430
Sathya Perla2632baf2013-10-01 16:00:00 +05301431 txo = &adapter->tx_obj[eqo->idx];
1432 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001433 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301434 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001435 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001436
Sathya Perla4097f662009-03-24 16:40:13 -07001437
Sathya Perla2632baf2013-10-01 16:00:00 +05301438 /* Skip, if wrapped around or first calculation */
1439 now = jiffies;
1440 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1441 rx_pkts < aic->rx_pkts_prev ||
1442 tx_pkts < aic->tx_reqs_prev) {
1443 be_aic_update(aic, rx_pkts, tx_pkts, now);
1444 continue;
1445 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001446
Sathya Perla2632baf2013-10-01 16:00:00 +05301447 delta = jiffies_to_msecs(now - aic->jiffies);
1448 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1449 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1450 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451
Sathya Perla2632baf2013-10-01 16:00:00 +05301452 if (eqd < 8)
1453 eqd = 0;
1454 eqd = min_t(u32, eqd, aic->max_eqd);
1455 eqd = max_t(u32, eqd, aic->min_eqd);
1456
1457 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001458modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301459 if (eqd != aic->prev_eqd) {
1460 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1461 set_eqd[num].eq_id = eqo->q.id;
1462 aic->prev_eqd = eqd;
1463 num++;
1464 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001465 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301466
1467 if (num)
1468 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001469}
1470
Sathya Perla3abcded2010-10-03 22:12:27 -07001471static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001473{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001474 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001475
Sathya Perlaab1594e2011-07-25 19:10:15 +00001476 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001478 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001479 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001480 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001481 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001482 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001483 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001484 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485}
1486
Sathya Perla2e588f82011-03-11 02:49:26 +00001487static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001488{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001489 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301490 * Also ignore ipcksm for ipv6 pkts
1491 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001492 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301493 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001494}
1495
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301496static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001500 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301501 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502
Sathya Perla3abcded2010-10-03 22:12:27 -07001503 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 BUG_ON(!rx_page_info->page);
1505
Sathya Perlae50287b2014-03-04 12:14:38 +05301506 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001507 dma_unmap_page(&adapter->pdev->dev,
1508 dma_unmap_addr(rx_page_info, bus),
1509 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301510 rx_page_info->last_frag = false;
1511 } else {
1512 dma_sync_single_for_cpu(&adapter->pdev->dev,
1513 dma_unmap_addr(rx_page_info, bus),
1514 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001515 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301517 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 atomic_dec(&rxq->used);
1519 return rx_page_info;
1520}
1521
1522/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001523static void be_rx_compl_discard(struct be_rx_obj *rxo,
1524 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001529 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301530 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001531 put_page(page_info->page);
1532 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533 }
1534}
1535
1536/*
1537 * skb_fill_rx_data forms a complete skb for an ether frame
1538 * indicated by rxcp.
1539 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001540static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1541 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001544 u16 i, j;
1545 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 u8 *start;
1547
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301548 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 start = page_address(page_info->page) + page_info->page_offset;
1550 prefetch(start);
1551
1552 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001553 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 skb->len = curr_frag_len;
1556 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001557 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 /* Complete packet has now been moved to data */
1559 put_page(page_info->page);
1560 skb->data_len = 0;
1561 skb->tail += curr_frag_len;
1562 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001563 hdr_len = ETH_HLEN;
1564 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001566 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 skb_shinfo(skb)->frags[0].page_offset =
1568 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001569 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001571 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 skb->tail += hdr_len;
1573 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001574 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Sathya Perla2e588f82011-03-11 02:49:26 +00001576 if (rxcp->pkt_size <= rx_frag_size) {
1577 BUG_ON(rxcp->num_rcvd != 1);
1578 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 }
1580
1581 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001582 remaining = rxcp->pkt_size - curr_frag_len;
1583 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301584 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001585 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001587 /* Coalesce all frags from the same physical page in one slot */
1588 if (page_info->page_offset == 0) {
1589 /* Fresh page */
1590 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001591 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001592 skb_shinfo(skb)->frags[j].page_offset =
1593 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001594 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001595 skb_shinfo(skb)->nr_frags++;
1596 } else {
1597 put_page(page_info->page);
1598 }
1599
Eric Dumazet9e903e02011-10-18 21:00:24 +00001600 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 skb->len += curr_frag_len;
1602 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001603 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001604 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001605 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001607 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608}
1609
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001610/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301611static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001612 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001614 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001615 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001617
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001618 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001619 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001620 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 return;
1623 }
1624
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001625 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001627 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001628 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001629 else
1630 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001632 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001633 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001635 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301636
1637 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301638 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639
Jiri Pirko343e43c2011-08-25 02:50:51 +00001640 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001642
1643 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644}
1645
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001646/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001647static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1648 struct napi_struct *napi,
1649 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001651 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001653 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001654 u16 remaining, curr_frag_len;
1655 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001656
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001658 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001659 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001660 return;
1661 }
1662
Sathya Perla2e588f82011-03-11 02:49:26 +00001663 remaining = rxcp->pkt_size;
1664 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301665 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
1667 curr_frag_len = min(remaining, rx_frag_size);
1668
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001669 /* Coalesce all frags from the same physical page in one slot */
1670 if (i == 0 || page_info->page_offset == 0) {
1671 /* First frag or Fresh page */
1672 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001673 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001674 skb_shinfo(skb)->frags[j].page_offset =
1675 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001676 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001677 } else {
1678 put_page(page_info->page);
1679 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001680 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001681 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 memset(page_info, 0, sizeof(*page_info));
1684 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001685 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001687 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001688 skb->len = rxcp->pkt_size;
1689 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001691 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001692 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001693 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301694
1695 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301696 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001697
Jiri Pirko343e43c2011-08-25 02:50:51 +00001698 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001699 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001700
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001701 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702}
1703
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001704static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1705 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706{
Sathya Perla2e588f82011-03-11 02:49:26 +00001707 rxcp->pkt_size =
1708 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1709 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1710 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1711 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001712 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001713 rxcp->ip_csum =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1715 rxcp->l4_csum =
1716 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1717 rxcp->ipv6 =
1718 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 rxcp->num_rcvd =
1720 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1721 rxcp->pkt_type =
1722 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001723 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001724 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001725 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001727 compl);
1728 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1729 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001730 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301732 rxcp->tunneled =
1733 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001734}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001736static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1737 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001738{
1739 rxcp->pkt_size =
1740 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1741 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1742 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1743 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001744 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001745 rxcp->ip_csum =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1747 rxcp->l4_csum =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1749 rxcp->ipv6 =
1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001751 rxcp->num_rcvd =
1752 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1753 rxcp->pkt_type =
1754 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001755 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001756 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001757 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001759 compl);
1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1761 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001762 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001764 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1765 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001766}
1767
1768static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1769{
1770 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1771 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1772 struct be_adapter *adapter = rxo->adapter;
1773
1774 /* For checking the valid bit it is Ok to use either definition as the
1775 * valid bit is at the same position in both v0 and v1 Rx compl */
1776 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 return NULL;
1778
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001779 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001780 be_dws_le_to_cpu(compl, sizeof(*compl));
1781
1782 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001784 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001785 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001786
Somnath Koture38b1702013-05-29 22:55:56 +00001787 if (rxcp->ip_frag)
1788 rxcp->l4_csum = 0;
1789
Sathya Perla15d72182011-03-21 20:49:26 +00001790 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301791 /* In QNQ modes, if qnq bit is not set, then the packet was
1792 * tagged only with the transparent outer vlan-tag and must
1793 * not be treated as a vlan packet by host
1794 */
1795 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001796 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001797
Sathya Perla15d72182011-03-21 20:49:26 +00001798 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001799 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001800
Somnath Kotur939cf302011-08-18 21:51:49 -07001801 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001802 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001803 rxcp->vlanf = 0;
1804 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001805
1806 /* As the compl has been parsed, reset it; we wont touch it again */
1807 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808
Sathya Perla3abcded2010-10-03 22:12:27 -07001809 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810 return rxcp;
1811}
1812
Eric Dumazet1829b082011-03-01 05:48:12 +00001813static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001816
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001818 gfp |= __GFP_COMP;
1819 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820}
1821
1822/*
1823 * Allocate a page, split it to fragments of size rx_frag_size and post as
1824 * receive buffers to BE
1825 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001826static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827{
Sathya Perla3abcded2010-10-03 22:12:27 -07001828 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001829 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001832 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 struct be_eth_rx_d *rxd;
1834 u64 page_dmaaddr = 0, frag_dmaaddr;
1835 u32 posted, page_offset = 0;
1836
Sathya Perla3abcded2010-10-03 22:12:27 -07001837 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1839 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001840 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001842 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843 break;
1844 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001845 page_dmaaddr = dma_map_page(dev, pagep, 0,
1846 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001847 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001848 if (dma_mapping_error(dev, page_dmaaddr)) {
1849 put_page(pagep);
1850 pagep = NULL;
1851 rx_stats(rxo)->rx_post_fail++;
1852 break;
1853 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301854 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 } else {
1856 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301857 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301859 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
1862 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301863 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1865 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866
1867 /* Any space left in the current big page for another frag? */
1868 if ((page_offset + rx_frag_size + rx_frag_size) >
1869 adapter->big_page_size) {
1870 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301871 page_info->last_frag = true;
1872 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1873 } else {
1874 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001876
1877 prev_page_info = page_info;
1878 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001879 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301881
1882 /* Mark the last frag of a page when we break out of the above loop
1883 * with no more slots available in the RXQ
1884 */
1885 if (pagep) {
1886 prev_page_info->last_frag = true;
1887 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1888 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
1890 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301892 if (rxo->rx_post_starved)
1893 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001894 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001895 } else if (atomic_read(&rxq->used) == 0) {
1896 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001897 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899}
1900
Sathya Perla5fb379e2009-06-18 00:02:59 +00001901static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1904
1905 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1906 return NULL;
1907
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001908 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1910
1911 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1912
1913 queue_tail_inc(tx_cq);
1914 return txcp;
1915}
1916
Sathya Perla3c8def92011-06-12 20:01:58 +00001917static u16 be_tx_compl_process(struct be_adapter *adapter,
1918 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919{
Sathya Perla3c8def92011-06-12 20:01:58 +00001920 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001921 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001922 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001924 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1925 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001927 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001929 sent_skbs[txq->tail] = NULL;
1930
1931 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001932 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001934 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001936 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001937 unmap_tx_frag(&adapter->pdev->dev, wrb,
1938 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001939 unmap_skb_hdr = false;
1940
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 num_wrbs++;
1942 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001943 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001945 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001946 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947}
1948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949/* Return the number of events in the event queue */
1950static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001951{
1952 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001954
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001955 do {
1956 eqe = queue_tail_node(&eqo->q);
1957 if (eqe->evt == 0)
1958 break;
1959
1960 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001961 eqe->evt = 0;
1962 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001963 queue_tail_inc(&eqo->q);
1964 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001965
1966 return num;
1967}
1968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969/* Leaves the EQ is disarmed state */
1970static void be_eq_clean(struct be_eq_obj *eqo)
1971{
1972 int num = events_get(eqo);
1973
1974 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1975}
1976
1977static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978{
1979 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 struct be_queue_info *rxq = &rxo->q;
1981 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001982 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001983 struct be_adapter *adapter = rxo->adapter;
1984 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985
Sathya Perlad23e9462012-12-17 19:38:51 +00001986 /* Consume pending rx completions.
1987 * Wait for the flush completion (identified by zero num_rcvd)
1988 * to arrive. Notify CQ even when there are no more CQ entries
1989 * for HW to flush partially coalesced CQ entries.
1990 * In Lancer, there is no need to wait for flush compl.
1991 */
1992 for (;;) {
1993 rxcp = be_rx_compl_get(rxo);
1994 if (rxcp == NULL) {
1995 if (lancer_chip(adapter))
1996 break;
1997
1998 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1999 dev_warn(&adapter->pdev->dev,
2000 "did not receive flush compl\n");
2001 break;
2002 }
2003 be_cq_notify(adapter, rx_cq->id, true, 0);
2004 mdelay(1);
2005 } else {
2006 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002007 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002008 if (rxcp->num_rcvd == 0)
2009 break;
2010 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011 }
2012
Sathya Perlad23e9462012-12-17 19:38:51 +00002013 /* After cleanup, leave the CQ in unarmed state */
2014 be_cq_notify(adapter, rx_cq->id, false, 0);
2015
2016 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302017 while (atomic_read(&rxq->used) > 0) {
2018 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 put_page(page_info->page);
2020 memset(page_info, 0, sizeof(*page_info));
2021 }
2022 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002023 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024}
2025
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002026static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002028 struct be_tx_obj *txo;
2029 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002030 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002031 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002032 struct sk_buff *sent_skb;
2033 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002034 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302036 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002037 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002038 pending_txqs = adapter->num_tx_qs;
2039
2040 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302041 cmpl = 0;
2042 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002043 txq = &txo->q;
2044 while ((txcp = be_tx_compl_get(&txo->cq))) {
2045 end_idx =
2046 AMAP_GET_BITS(struct amap_eth_tx_compl,
2047 wrb_index, txcp);
2048 num_wrbs += be_tx_compl_process(adapter, txo,
2049 end_idx);
2050 cmpl++;
2051 }
2052 if (cmpl) {
2053 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2054 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302055 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002056 }
2057 if (atomic_read(&txq->used) == 0)
2058 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002059 }
2060
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302061 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002062 break;
2063
2064 mdelay(1);
2065 } while (true);
2066
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002067 for_all_tx_queues(adapter, txo, i) {
2068 txq = &txo->q;
2069 if (atomic_read(&txq->used))
2070 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2071 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002072
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002073 /* free posted tx for which compls will never arrive */
2074 while (atomic_read(&txq->used)) {
2075 sent_skb = txo->sent_skb_list[txq->tail];
2076 end_idx = txq->tail;
2077 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2078 &dummy_wrb);
2079 index_adv(&end_idx, num_wrbs - 1, txq->len);
2080 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2081 atomic_sub(num_wrbs, &txq->used);
2082 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002083 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084}
2085
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002086static void be_evt_queues_destroy(struct be_adapter *adapter)
2087{
2088 struct be_eq_obj *eqo;
2089 int i;
2090
2091 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002092 if (eqo->q.created) {
2093 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002094 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302095 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302096 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002097 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 be_queue_free(adapter, &eqo->q);
2099 }
2100}
2101
2102static int be_evt_queues_create(struct be_adapter *adapter)
2103{
2104 struct be_queue_info *eq;
2105 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302106 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107 int i, rc;
2108
Sathya Perla92bf14a2013-08-27 16:57:32 +05302109 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2110 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111
2112 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302113 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2114 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302115 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302116 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117 eqo->adapter = adapter;
2118 eqo->tx_budget = BE_TX_BUDGET;
2119 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302120 aic->max_eqd = BE_MAX_EQD;
2121 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122
2123 eq = &eqo->q;
2124 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2125 sizeof(struct be_eq_entry));
2126 if (rc)
2127 return rc;
2128
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302129 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130 if (rc)
2131 return rc;
2132 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002133 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134}
2135
Sathya Perla5fb379e2009-06-18 00:02:59 +00002136static void be_mcc_queues_destroy(struct be_adapter *adapter)
2137{
2138 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002139
Sathya Perla8788fdc2009-07-27 22:52:03 +00002140 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002141 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002142 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002143 be_queue_free(adapter, q);
2144
Sathya Perla8788fdc2009-07-27 22:52:03 +00002145 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002146 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002147 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002148 be_queue_free(adapter, q);
2149}
2150
2151/* Must be called only after TX qs are created as MCC shares TX EQ */
2152static int be_mcc_queues_create(struct be_adapter *adapter)
2153{
2154 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002155
Sathya Perla8788fdc2009-07-27 22:52:03 +00002156 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002157 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002158 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002159 goto err;
2160
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161 /* Use the default EQ for MCC completions */
2162 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002163 goto mcc_cq_free;
2164
Sathya Perla8788fdc2009-07-27 22:52:03 +00002165 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002166 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2167 goto mcc_cq_destroy;
2168
Sathya Perla8788fdc2009-07-27 22:52:03 +00002169 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002170 goto mcc_q_free;
2171
2172 return 0;
2173
2174mcc_q_free:
2175 be_queue_free(adapter, q);
2176mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002177 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178mcc_cq_free:
2179 be_queue_free(adapter, cq);
2180err:
2181 return -1;
2182}
2183
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184static void be_tx_queues_destroy(struct be_adapter *adapter)
2185{
2186 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002187 struct be_tx_obj *txo;
2188 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
Sathya Perla3c8def92011-06-12 20:01:58 +00002190 for_all_tx_queues(adapter, txo, i) {
2191 q = &txo->q;
2192 if (q->created)
2193 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2194 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195
Sathya Perla3c8def92011-06-12 20:01:58 +00002196 q = &txo->cq;
2197 if (q->created)
2198 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2199 be_queue_free(adapter, q);
2200 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201}
2202
Sathya Perla77071332013-08-27 16:57:34 +05302203static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002206 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302207 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208
Sathya Perla92bf14a2013-08-27 16:57:32 +05302209 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002210
Sathya Perla3c8def92011-06-12 20:01:58 +00002211 for_all_tx_queues(adapter, txo, i) {
2212 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2214 sizeof(struct be_eth_tx_compl));
2215 if (status)
2216 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217
John Stultz827da442013-10-07 15:51:58 -07002218 u64_stats_init(&txo->stats.sync);
2219 u64_stats_init(&txo->stats.sync_compl);
2220
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 /* If num_evt_qs is less than num_tx_qs, then more than
2222 * one txq share an eq
2223 */
2224 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2225 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2226 if (status)
2227 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002229 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2230 sizeof(struct be_eth_wrb));
2231 if (status)
2232 return status;
2233
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002234 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002235 if (status)
2236 return status;
2237 }
2238
Sathya Perlad3791422012-09-28 04:39:44 +00002239 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2240 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 return 0;
2242}
2243
2244static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245{
2246 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002247 struct be_rx_obj *rxo;
2248 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249
Sathya Perla3abcded2010-10-03 22:12:27 -07002250 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002251 q = &rxo->cq;
2252 if (q->created)
2253 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2254 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256}
2257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002259{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002261 struct be_rx_obj *rxo;
2262 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263
Sathya Perla92bf14a2013-08-27 16:57:32 +05302264 /* We can create as many RSS rings as there are EQs. */
2265 adapter->num_rx_qs = adapter->num_evt_qs;
2266
2267 /* We'll use RSS only if atleast 2 RSS rings are supported.
2268 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302270 if (adapter->num_rx_qs > 1)
2271 adapter->num_rx_qs++;
2272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002274 for_all_rx_queues(adapter, rxo, i) {
2275 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002276 cq = &rxo->cq;
2277 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2278 sizeof(struct be_eth_rx_compl));
2279 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
John Stultz827da442013-10-07 15:51:58 -07002282 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2284 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002285 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002287 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288
Sathya Perlad3791422012-09-28 04:39:44 +00002289 dev_info(&adapter->pdev->dev,
2290 "created %d RSS queue(s) and 1 default RX queue\n",
2291 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002293}
2294
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295static irqreturn_t be_intx(int irq, void *dev)
2296{
Sathya Perlae49cc342012-11-27 19:50:02 +00002297 struct be_eq_obj *eqo = dev;
2298 struct be_adapter *adapter = eqo->adapter;
2299 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002301 /* IRQ is not expected when NAPI is scheduled as the EQ
2302 * will not be armed.
2303 * But, this can happen on Lancer INTx where it takes
2304 * a while to de-assert INTx or in BE2 where occasionaly
2305 * an interrupt may be raised even when EQ is unarmed.
2306 * If NAPI is already scheduled, then counting & notifying
2307 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002308 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002309 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002310 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002311 __napi_schedule(&eqo->napi);
2312 if (num_evts)
2313 eqo->spurious_intr = 0;
2314 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002315 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002316
2317 /* Return IRQ_HANDLED only for the the first spurious intr
2318 * after a valid intr to stop the kernel from branding
2319 * this irq as a bad one!
2320 */
2321 if (num_evts || eqo->spurious_intr++ == 0)
2322 return IRQ_HANDLED;
2323 else
2324 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325}
2326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330
Sathya Perla0b545a62012-11-23 00:27:18 +00002331 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2332 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333 return IRQ_HANDLED;
2334}
2335
Sathya Perla2e588f82011-03-11 02:49:26 +00002336static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337{
Somnath Koture38b1702013-05-29 22:55:56 +00002338 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339}
2340
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302342 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343{
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 struct be_adapter *adapter = rxo->adapter;
2345 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002346 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 u32 work_done;
2348
2349 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002350 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 if (!rxcp)
2352 break;
2353
Sathya Perla12004ae2011-08-02 19:57:46 +00002354 /* Is it a flush compl that has no data */
2355 if (unlikely(rxcp->num_rcvd == 0))
2356 goto loop_continue;
2357
2358 /* Discard compl with partial DMA Lancer B0 */
2359 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002361 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002362 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002363
Sathya Perla12004ae2011-08-02 19:57:46 +00002364 /* On BE drop pkts that arrive due to imperfect filtering in
2365 * promiscuous mode on some skews
2366 */
2367 if (unlikely(rxcp->port != adapter->port_num &&
2368 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002370 goto loop_continue;
2371 }
2372
Sathya Perla6384a4d2013-10-25 10:40:16 +05302373 /* Don't do gro when we're busy_polling */
2374 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002375 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002376 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302377 be_rx_compl_process(rxo, napi, rxcp);
2378
Sathya Perla12004ae2011-08-02 19:57:46 +00002379loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002380 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381 }
2382
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 if (work_done) {
2384 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002385
Sathya Perla6384a4d2013-10-25 10:40:16 +05302386 /* When an rx-obj gets into post_starved state, just
2387 * let be_worker do the posting.
2388 */
2389 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2390 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394 return work_done;
2395}
2396
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2398 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 for (work_done = 0; work_done < budget; work_done++) {
2404 txcp = be_tx_compl_get(&txo->cq);
2405 if (!txcp)
2406 break;
2407 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002408 AMAP_GET_BITS(struct amap_eth_tx_compl,
2409 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410 }
2411
2412 if (work_done) {
2413 be_cq_notify(adapter, txo->cq.id, true, work_done);
2414 atomic_sub(num_wrbs, &txo->q.used);
2415
2416 /* As Tx wrbs have been freed up, wake up netdev queue
2417 * if it was stopped due to lack of tx wrbs. */
2418 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2419 atomic_read(&txo->q.used) < txo->q.len / 2) {
2420 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002421 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002422
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002423 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2424 tx_stats(txo)->tx_compl += work_done;
2425 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2426 }
2427 return (work_done < budget); /* Done */
2428}
Sathya Perla3c8def92011-06-12 20:01:58 +00002429
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302430int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431{
2432 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2433 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002434 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302435 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002437
Sathya Perla0b545a62012-11-23 00:27:18 +00002438 num_evts = events_get(eqo);
2439
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 /* Process all TXQs serviced by this EQ */
2441 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2442 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2443 eqo->tx_budget, i);
2444 if (!tx_done)
2445 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446 }
2447
Sathya Perla6384a4d2013-10-25 10:40:16 +05302448 if (be_lock_napi(eqo)) {
2449 /* This loop will iterate twice for EQ0 in which
2450 * completions of the last RXQ (default one) are also processed
2451 * For other EQs the loop iterates only once
2452 */
2453 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2454 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2455 max_work = max(work, max_work);
2456 }
2457 be_unlock_napi(eqo);
2458 } else {
2459 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002460 }
2461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 if (is_mcc_eqo(eqo))
2463 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 if (max_work < budget) {
2466 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002467 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 } else {
2469 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002470 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002471 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002472 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473}
2474
Sathya Perla6384a4d2013-10-25 10:40:16 +05302475#ifdef CONFIG_NET_RX_BUSY_POLL
2476static int be_busy_poll(struct napi_struct *napi)
2477{
2478 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2479 struct be_adapter *adapter = eqo->adapter;
2480 struct be_rx_obj *rxo;
2481 int i, work = 0;
2482
2483 if (!be_lock_busy_poll(eqo))
2484 return LL_FLUSH_BUSY;
2485
2486 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2487 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2488 if (work)
2489 break;
2490 }
2491
2492 be_unlock_busy_poll(eqo);
2493 return work;
2494}
2495#endif
2496
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002497void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002498{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002499 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2500 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002501 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302502 bool error_detected = false;
2503 struct device *dev = &adapter->pdev->dev;
2504 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002505
Sathya Perlad23e9462012-12-17 19:38:51 +00002506 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002507 return;
2508
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002509 if (lancer_chip(adapter)) {
2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512 sliport_err1 = ioread32(adapter->db +
2513 SLIPORT_ERROR1_OFFSET);
2514 sliport_err2 = ioread32(adapter->db +
2515 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302516 adapter->hw_error = true;
2517 /* Do not log error messages if its a FW reset */
2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2519 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2520 dev_info(dev, "Firmware update in progress\n");
2521 } else {
2522 error_detected = true;
2523 dev_err(dev, "Error detected in the card\n");
2524 dev_err(dev, "ERR: sliport status 0x%x\n",
2525 sliport_status);
2526 dev_err(dev, "ERR: sliport error1 0x%x\n",
2527 sliport_err1);
2528 dev_err(dev, "ERR: sliport error2 0x%x\n",
2529 sliport_err2);
2530 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002531 }
2532 } else {
2533 pci_read_config_dword(adapter->pdev,
2534 PCICFG_UE_STATUS_LOW, &ue_lo);
2535 pci_read_config_dword(adapter->pdev,
2536 PCICFG_UE_STATUS_HIGH, &ue_hi);
2537 pci_read_config_dword(adapter->pdev,
2538 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2539 pci_read_config_dword(adapter->pdev,
2540 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002541
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002542 ue_lo = (ue_lo & ~ue_lo_mask);
2543 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002544
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302545 /* On certain platforms BE hardware can indicate spurious UEs.
2546 * Allow HW to stop working completely in case of a real UE.
2547 * Hence not setting the hw_error for UE detection.
2548 */
2549
2550 if (ue_lo || ue_hi) {
2551 error_detected = true;
2552 dev_err(dev,
2553 "Unrecoverable Error detected in the adapter");
2554 dev_err(dev, "Please reboot server to recover");
2555 if (skyhawk_chip(adapter))
2556 adapter->hw_error = true;
2557 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2558 if (ue_lo & 1)
2559 dev_err(dev, "UE: %s bit set\n",
2560 ue_status_low_desc[i]);
2561 }
2562 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2563 if (ue_hi & 1)
2564 dev_err(dev, "UE: %s bit set\n",
2565 ue_status_hi_desc[i]);
2566 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302567 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002568 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302569 if (error_detected)
2570 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002571}
2572
Sathya Perla8d56ff12009-11-22 22:02:26 +00002573static void be_msix_disable(struct be_adapter *adapter)
2574{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002575 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002576 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002577 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302578 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002579 }
2580}
2581
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002582static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002584 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002585 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586
Sathya Perla92bf14a2013-08-27 16:57:32 +05302587 /* If RoCE is supported, program the max number of NIC vectors that
2588 * may be configured via set-channels, along with vectors needed for
2589 * RoCe. Else, just program the number we'll use initially.
2590 */
2591 if (be_roce_supported(adapter))
2592 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2593 2 * num_online_cpus());
2594 else
2595 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002596
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002597 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598 adapter->msix_entries[i].entry = i;
2599
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002600 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2601 MIN_MSIX_VECTORS, num_vec);
2602 if (num_vec < 0)
2603 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002604
Sathya Perla92bf14a2013-08-27 16:57:32 +05302605 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2606 adapter->num_msix_roce_vec = num_vec / 2;
2607 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2608 adapter->num_msix_roce_vec);
2609 }
2610
2611 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2612
2613 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2614 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002615 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002616
2617fail:
2618 dev_warn(dev, "MSIx enable failed\n");
2619
2620 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2621 if (!be_physfn(adapter))
2622 return num_vec;
2623 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624}
2625
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002626static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002627 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302629 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630}
2631
2632static int be_msix_register(struct be_adapter *adapter)
2633{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 struct net_device *netdev = adapter->netdev;
2635 struct be_eq_obj *eqo;
2636 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 for_all_evt_queues(adapter, eqo, i) {
2639 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2640 vec = be_msix_vec_get(adapter, eqo);
2641 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002642 if (status)
2643 goto err_msix;
2644 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002647err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2651 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002652 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653 return status;
2654}
2655
2656static int be_irq_register(struct be_adapter *adapter)
2657{
2658 struct net_device *netdev = adapter->netdev;
2659 int status;
2660
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002661 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662 status = be_msix_register(adapter);
2663 if (status == 0)
2664 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002665 /* INTx is not supported for VF */
2666 if (!be_physfn(adapter))
2667 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668 }
2669
Sathya Perlae49cc342012-11-27 19:50:02 +00002670 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671 netdev->irq = adapter->pdev->irq;
2672 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002673 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674 if (status) {
2675 dev_err(&adapter->pdev->dev,
2676 "INTx request IRQ failed - err %d\n", status);
2677 return status;
2678 }
2679done:
2680 adapter->isr_registered = true;
2681 return 0;
2682}
2683
2684static void be_irq_unregister(struct be_adapter *adapter)
2685{
2686 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002688 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002689
2690 if (!adapter->isr_registered)
2691 return;
2692
2693 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002694 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002695 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696 goto done;
2697 }
2698
2699 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 for_all_evt_queues(adapter, eqo, i)
2701 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703done:
2704 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705}
2706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002708{
2709 struct be_queue_info *q;
2710 struct be_rx_obj *rxo;
2711 int i;
2712
2713 for_all_rx_queues(adapter, rxo, i) {
2714 q = &rxo->q;
2715 if (q->created) {
2716 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002718 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002720 }
2721}
2722
Sathya Perla889cd4b2010-05-30 23:33:45 +00002723static int be_close(struct net_device *netdev)
2724{
2725 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002726 struct be_eq_obj *eqo;
2727 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002728
Kalesh APe1ad8e32014-04-14 16:12:41 +05302729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
Parav Pandit045508a2012-03-26 14:27:13 +00002735 be_roce_dev_close(adapter);
2736
Ivan Veceradff345c52013-11-27 08:59:32 +01002737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2738 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002739 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302740 be_disable_busy_poll(eqo);
2741 }
David S. Miller71237b62013-11-28 18:53:36 -05002742 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002743 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002744
2745 be_async_mcc_disable(adapter);
2746
2747 /* Wait for all pending tx completions to arrive so that
2748 * all tx skbs are freed.
2749 */
Sathya Perlafba87552013-05-08 02:05:50 +00002750 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302751 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002752
2753 be_rx_qs_destroy(adapter);
2754
Ajit Khaparded11a3472013-11-18 10:44:37 -06002755 for (i = 1; i < (adapter->uc_macs + 1); i++)
2756 be_cmd_pmac_del(adapter, adapter->if_handle,
2757 adapter->pmac_id[i], 0);
2758 adapter->uc_macs = 0;
2759
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002760 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 if (msix_enabled(adapter))
2762 synchronize_irq(be_msix_vec_get(adapter, eqo));
2763 else
2764 synchronize_irq(netdev->irq);
2765 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002766 }
2767
Sathya Perla889cd4b2010-05-30 23:33:45 +00002768 be_irq_unregister(adapter);
2769
Sathya Perla482c9e72011-06-29 23:33:17 +00002770 return 0;
2771}
2772
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002773static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002774{
2775 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002776 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302777 u8 rss_hkey[RSS_HASH_KEY_LEN];
2778 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002779
2780 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2782 sizeof(struct be_eth_rx_d));
2783 if (rc)
2784 return rc;
2785 }
2786
2787 /* The FW would like the default RXQ to be created first */
2788 rxo = default_rxo(adapter);
2789 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2790 adapter->if_handle, false, &rxo->rss_id);
2791 if (rc)
2792 return rc;
2793
2794 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002795 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 rx_frag_size, adapter->if_handle,
2797 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002798 if (rc)
2799 return rc;
2800 }
2801
2802 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302803 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2804 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002805 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302806 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002807 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302808 rss->rsstable[j + i] = rxo->rss_id;
2809 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002810 }
2811 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302812 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2813 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002814
2815 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302816 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2817 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302818 } else {
2819 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302820 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302821 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002822
Venkata Duvvurue2557872014-04-21 15:38:00 +05302823 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
2824 rc = be_cmd_rss_config(adapter, rss->rsstable,
2825 rss->rss_flags,
2826 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302827 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302828 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302829 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002830 }
2831
Venkata Duvvurue2557872014-04-21 15:38:00 +05302832 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2833
Sathya Perla482c9e72011-06-29 23:33:17 +00002834 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002835 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002836 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002837 return 0;
2838}
2839
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002840static int be_open(struct net_device *netdev)
2841{
2842 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002844 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002846 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002847 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002850 if (status)
2851 goto err;
2852
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002853 status = be_irq_register(adapter);
2854 if (status)
2855 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002856
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002857 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002858 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002859
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002860 for_all_tx_queues(adapter, txo, i)
2861 be_cq_notify(adapter, txo->cq.id, true, 0);
2862
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002863 be_async_mcc_enable(adapter);
2864
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002865 for_all_evt_queues(adapter, eqo, i) {
2866 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302867 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002868 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2869 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002870 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002871
Sathya Perla323ff712012-09-28 04:39:43 +00002872 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002873 if (!status)
2874 be_link_status_update(adapter, link_status);
2875
Sathya Perlafba87552013-05-08 02:05:50 +00002876 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002877 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302878
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302879#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302880 if (skyhawk_chip(adapter))
2881 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302882#endif
2883
Sathya Perla889cd4b2010-05-30 23:33:45 +00002884 return 0;
2885err:
2886 be_close(adapter->netdev);
2887 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002888}
2889
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002890static int be_setup_wol(struct be_adapter *adapter, bool enable)
2891{
2892 struct be_dma_mem cmd;
2893 int status = 0;
2894 u8 mac[ETH_ALEN];
2895
2896 memset(mac, 0, ETH_ALEN);
2897
2898 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002899 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2900 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002901 if (cmd.va == NULL)
2902 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002903
2904 if (enable) {
2905 status = pci_write_config_dword(adapter->pdev,
2906 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2907 if (status) {
2908 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002909 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002910 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2911 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002912 return status;
2913 }
2914 status = be_cmd_enable_magic_wol(adapter,
2915 adapter->netdev->dev_addr, &cmd);
2916 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2917 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2918 } else {
2919 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2920 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2921 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2922 }
2923
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002924 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002925 return status;
2926}
2927
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002928/*
2929 * Generate a seed MAC address from the PF MAC Address using jhash.
2930 * MAC Address for VFs are assigned incrementally starting from the seed.
2931 * These addresses are programmed in the ASIC by the PF and the VF driver
2932 * queries for the MAC address during its probe.
2933 */
Sathya Perla4c876612013-02-03 20:30:11 +00002934static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002935{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002936 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002937 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002938 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002939 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002940
2941 be_vf_eth_addr_generate(adapter, mac);
2942
Sathya Perla11ac75e2011-12-13 00:58:50 +00002943 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302944 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002945 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002946 vf_cfg->if_handle,
2947 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302948 else
2949 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2950 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002951
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002952 if (status)
2953 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002954 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002955 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002956 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002957
2958 mac[5] += 1;
2959 }
2960 return status;
2961}
2962
Sathya Perla4c876612013-02-03 20:30:11 +00002963static int be_vfs_mac_query(struct be_adapter *adapter)
2964{
2965 int status, vf;
2966 u8 mac[ETH_ALEN];
2967 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002968
2969 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302970 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2971 mac, vf_cfg->if_handle,
2972 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002973 if (status)
2974 return status;
2975 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2976 }
2977 return 0;
2978}
2979
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002980static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002981{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002982 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002983 u32 vf;
2984
Sathya Perla257a3fe2013-06-14 15:54:51 +05302985 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002986 dev_warn(&adapter->pdev->dev,
2987 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002988 goto done;
2989 }
2990
Sathya Perlab4c1df92013-05-08 02:05:47 +00002991 pci_disable_sriov(adapter->pdev);
2992
Sathya Perla11ac75e2011-12-13 00:58:50 +00002993 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302994 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002995 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2996 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302997 else
2998 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2999 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003000
Sathya Perla11ac75e2011-12-13 00:58:50 +00003001 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3002 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003003done:
3004 kfree(adapter->vf_cfg);
3005 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003006}
3007
Sathya Perla77071332013-08-27 16:57:34 +05303008static void be_clear_queues(struct be_adapter *adapter)
3009{
3010 be_mcc_queues_destroy(adapter);
3011 be_rx_cqs_destroy(adapter);
3012 be_tx_queues_destroy(adapter);
3013 be_evt_queues_destroy(adapter);
3014}
3015
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303016static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003017{
Sathya Perla191eb752012-02-23 18:50:13 +00003018 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3019 cancel_delayed_work_sync(&adapter->work);
3020 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3021 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303022}
3023
Somnath Koturb05004a2013-12-05 12:08:16 +05303024static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303025{
3026 int i;
3027
Somnath Koturb05004a2013-12-05 12:08:16 +05303028 if (adapter->pmac_id) {
3029 for (i = 0; i < (adapter->uc_macs + 1); i++)
3030 be_cmd_pmac_del(adapter, adapter->if_handle,
3031 adapter->pmac_id[i], 0);
3032 adapter->uc_macs = 0;
3033
3034 kfree(adapter->pmac_id);
3035 adapter->pmac_id = NULL;
3036 }
3037}
3038
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303039#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303040static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3041{
3042 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3043 be_cmd_manage_iface(adapter, adapter->if_handle,
3044 OP_CONVERT_TUNNEL_TO_NORMAL);
3045
3046 if (adapter->vxlan_port)
3047 be_cmd_set_vxlan_port(adapter, 0);
3048
3049 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3050 adapter->vxlan_port = 0;
3051}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303052#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303053
Somnath Koturb05004a2013-12-05 12:08:16 +05303054static int be_clear(struct be_adapter *adapter)
3055{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303056 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003057
Sathya Perla11ac75e2011-12-13 00:58:50 +00003058 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003059 be_vf_clear(adapter);
3060
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303061#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303062 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303063#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303064 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303065 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003066
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003067 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003068
Sathya Perla77071332013-08-27 16:57:34 +05303069 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003070
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003071 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303072 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003073 return 0;
3074}
3075
Sathya Perla4c876612013-02-03 20:30:11 +00003076static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003077{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303078 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003079 struct be_vf_cfg *vf_cfg;
3080 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003081 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003082
Sathya Perla4c876612013-02-03 20:30:11 +00003083 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3084 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003085
Sathya Perla4c876612013-02-03 20:30:11 +00003086 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303087 if (!BE3_chip(adapter)) {
3088 status = be_cmd_get_profile_config(adapter, &res,
3089 vf + 1);
3090 if (!status)
3091 cap_flags = res.if_cap_flags;
3092 }
Sathya Perla4c876612013-02-03 20:30:11 +00003093
3094 /* If a FW profile exists, then cap_flags are updated */
3095 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3096 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3097 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3098 &vf_cfg->if_handle, vf + 1);
3099 if (status)
3100 goto err;
3101 }
3102err:
3103 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003104}
3105
Sathya Perla39f1d942012-05-08 19:41:24 +00003106static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003107{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003108 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003109 int vf;
3110
Sathya Perla39f1d942012-05-08 19:41:24 +00003111 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3112 GFP_KERNEL);
3113 if (!adapter->vf_cfg)
3114 return -ENOMEM;
3115
Sathya Perla11ac75e2011-12-13 00:58:50 +00003116 for_all_vfs(adapter, vf_cfg, vf) {
3117 vf_cfg->if_handle = -1;
3118 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003119 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003120 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003121}
3122
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003123static int be_vf_setup(struct be_adapter *adapter)
3124{
Sathya Perla4c876612013-02-03 20:30:11 +00003125 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303126 struct be_vf_cfg *vf_cfg;
3127 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303128 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303129 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003130
Sathya Perla257a3fe2013-06-14 15:54:51 +05303131 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003132 if (old_vfs) {
3133 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3134 if (old_vfs != num_vfs)
3135 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3136 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003137 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303138 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003139 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303140 be_max_vfs(adapter), num_vfs);
3141 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003142 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003143 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003144 }
3145
3146 status = be_vf_setup_init(adapter);
3147 if (status)
3148 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003149
Sathya Perla4c876612013-02-03 20:30:11 +00003150 if (old_vfs) {
3151 for_all_vfs(adapter, vf_cfg, vf) {
3152 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3153 if (status)
3154 goto err;
3155 }
3156 } else {
3157 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 if (status)
3159 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003160 }
3161
Sathya Perla4c876612013-02-03 20:30:11 +00003162 if (old_vfs) {
3163 status = be_vfs_mac_query(adapter);
3164 if (status)
3165 goto err;
3166 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003167 status = be_vf_eth_addr_config(adapter);
3168 if (status)
3169 goto err;
3170 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003171
Sathya Perla11ac75e2011-12-13 00:58:50 +00003172 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303173 /* Allow VFs to programs MAC/VLAN filters */
3174 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3175 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3176 status = be_cmd_set_fn_privileges(adapter,
3177 privileges |
3178 BE_PRIV_FILTMGMT,
3179 vf + 1);
3180 if (!status)
3181 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3182 vf);
3183 }
3184
Sathya Perla4c876612013-02-03 20:30:11 +00003185 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3186 * Allow full available bandwidth
3187 */
3188 if (BE3_chip(adapter) && !old_vfs)
Sathya Perlaa4018012014-03-27 10:46:18 +05303189 be_cmd_config_qos(adapter, 1000, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003190
3191 status = be_cmd_link_status_query(adapter, &lnk_speed,
3192 NULL, vf + 1);
3193 if (!status)
3194 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003195
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303196 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303197 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303198 be_cmd_set_logical_link_config(adapter,
3199 IFLA_VF_LINK_STATE_AUTO,
3200 vf+1);
3201 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003202 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003203
3204 if (!old_vfs) {
3205 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3206 if (status) {
3207 dev_err(dev, "SRIOV enable failed\n");
3208 adapter->num_vfs = 0;
3209 goto err;
3210 }
3211 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003212 return 0;
3213err:
Sathya Perla4c876612013-02-03 20:30:11 +00003214 dev_err(dev, "VF setup failed\n");
3215 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216 return status;
3217}
3218
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303219/* Converting function_mode bits on BE3 to SH mc_type enums */
3220
3221static u8 be_convert_mc_type(u32 function_mode)
3222{
3223 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3224 return vNIC1;
3225 else if (function_mode & FLEX10_MODE)
3226 return FLEX10;
3227 else if (function_mode & VNIC_MODE)
3228 return vNIC2;
3229 else if (function_mode & UMC_ENABLED)
3230 return UMC;
3231 else
3232 return MC_NONE;
3233}
3234
Sathya Perla92bf14a2013-08-27 16:57:32 +05303235/* On BE2/BE3 FW does not suggest the supported limits */
3236static void BEx_get_resources(struct be_adapter *adapter,
3237 struct be_resources *res)
3238{
3239 struct pci_dev *pdev = adapter->pdev;
3240 bool use_sriov = false;
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303241 int max_vfs = 0;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303242
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303243 if (be_physfn(adapter) && BE3_chip(adapter)) {
3244 be_cmd_get_profile_config(adapter, res, 0);
3245 /* Some old versions of BE3 FW don't report max_vfs value */
3246 if (res->max_vfs == 0) {
3247 max_vfs = pci_sriov_get_totalvfs(pdev);
3248 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3249 }
3250 use_sriov = res->max_vfs && sriov_want(adapter);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303251 }
3252
3253 if (be_physfn(adapter))
3254 res->max_uc_mac = BE_UC_PMAC_COUNT;
3255 else
3256 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3257
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303258 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3259
3260 if (be_is_mc(adapter)) {
3261 /* Assuming that there are 4 channels per port,
3262 * when multi-channel is enabled
3263 */
3264 if (be_is_qnq_mode(adapter))
3265 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3266 else
3267 /* In a non-qnq multichannel mode, the pvid
3268 * takes up one vlan entry
3269 */
3270 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3271 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303272 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303273 }
3274
Sathya Perla92bf14a2013-08-27 16:57:32 +05303275 res->max_mcast_mac = BE_MAX_MC;
3276
Vasundhara Volama5243da2014-03-11 18:53:07 +05303277 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3278 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3279 * *only* if it is RSS-capable.
3280 */
3281 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3282 !be_physfn(adapter) || (be_is_mc(adapter) &&
3283 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303284 res->max_tx_qs = 1;
3285 else
3286 res->max_tx_qs = BE3_MAX_TX_QS;
3287
3288 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3289 !use_sriov && be_physfn(adapter))
3290 res->max_rss_qs = (adapter->be3_native) ?
3291 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3292 res->max_rx_qs = res->max_rss_qs + 1;
3293
Suresh Reddye3dc8672014-01-06 13:02:25 +05303294 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303295 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303296 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3297 else
3298 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303299
3300 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3301 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3302 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3303}
3304
Sathya Perla30128032011-11-10 19:17:57 +00003305static void be_setup_init(struct be_adapter *adapter)
3306{
3307 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003308 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003309 adapter->if_handle = -1;
3310 adapter->be3_native = false;
3311 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003312 if (be_physfn(adapter))
3313 adapter->cmd_privileges = MAX_PRIVILEGES;
3314 else
3315 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003316}
3317
Sathya Perla92bf14a2013-08-27 16:57:32 +05303318static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003319{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303320 struct device *dev = &adapter->pdev->dev;
3321 struct be_resources res = {0};
3322 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003323
Sathya Perla92bf14a2013-08-27 16:57:32 +05303324 if (BEx_chip(adapter)) {
3325 BEx_get_resources(adapter, &res);
3326 adapter->res = res;
3327 }
3328
Sathya Perla92bf14a2013-08-27 16:57:32 +05303329 /* For Lancer, SH etc read per-function resource limits from FW.
3330 * GET_FUNC_CONFIG returns per function guaranteed limits.
3331 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3332 */
Sathya Perla4c876612013-02-03 20:30:11 +00003333 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303334 status = be_cmd_get_func_config(adapter, &res);
3335 if (status)
3336 return status;
3337
3338 /* If RoCE may be enabled stash away half the EQs for RoCE */
3339 if (be_roce_supported(adapter))
3340 res.max_evt_qs /= 2;
3341 adapter->res = res;
3342
3343 if (be_physfn(adapter)) {
3344 status = be_cmd_get_profile_config(adapter, &res, 0);
3345 if (status)
3346 return status;
3347 adapter->res.max_vfs = res.max_vfs;
3348 }
3349
3350 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3351 be_max_txqs(adapter), be_max_rxqs(adapter),
3352 be_max_rss(adapter), be_max_eqs(adapter),
3353 be_max_vfs(adapter));
3354 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3355 be_max_uc(adapter), be_max_mc(adapter),
3356 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003357 }
3358
Sathya Perla92bf14a2013-08-27 16:57:32 +05303359 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003360}
3361
Sathya Perla39f1d942012-05-08 19:41:24 +00003362/* Routine to query per function resource limits */
3363static int be_get_config(struct be_adapter *adapter)
3364{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303365 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003366 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003367
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003368 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3369 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003370 &adapter->function_caps,
3371 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003372 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303373 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003374
Vasundhara Volam542963b2014-01-15 13:23:33 +05303375 if (be_physfn(adapter)) {
3376 status = be_cmd_get_active_profile(adapter, &profile_id);
3377 if (!status)
3378 dev_info(&adapter->pdev->dev,
3379 "Using profile 0x%x\n", profile_id);
3380 }
3381
Sathya Perla92bf14a2013-08-27 16:57:32 +05303382 status = be_get_resources(adapter);
3383 if (status)
3384 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003385
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303386 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3387 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303388 if (!adapter->pmac_id)
3389 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003390
Sathya Perla92bf14a2013-08-27 16:57:32 +05303391 /* Sanitize cfg_num_qs based on HW and platform limits */
3392 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3393
3394 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003395}
3396
Sathya Perla95046b92013-07-23 15:25:02 +05303397static int be_mac_setup(struct be_adapter *adapter)
3398{
3399 u8 mac[ETH_ALEN];
3400 int status;
3401
3402 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3403 status = be_cmd_get_perm_mac(adapter, mac);
3404 if (status)
3405 return status;
3406
3407 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3408 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3409 } else {
3410 /* Maybe the HW was reset; dev_addr must be re-programmed */
3411 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3412 }
3413
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003414 /* For BE3-R VFs, the PF programs the initial MAC address */
3415 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3416 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3417 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303418 return 0;
3419}
3420
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303421static void be_schedule_worker(struct be_adapter *adapter)
3422{
3423 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3424 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3425}
3426
Sathya Perla77071332013-08-27 16:57:34 +05303427static int be_setup_queues(struct be_adapter *adapter)
3428{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303429 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303430 int status;
3431
3432 status = be_evt_queues_create(adapter);
3433 if (status)
3434 goto err;
3435
3436 status = be_tx_qs_create(adapter);
3437 if (status)
3438 goto err;
3439
3440 status = be_rx_cqs_create(adapter);
3441 if (status)
3442 goto err;
3443
3444 status = be_mcc_queues_create(adapter);
3445 if (status)
3446 goto err;
3447
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303448 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3449 if (status)
3450 goto err;
3451
3452 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3453 if (status)
3454 goto err;
3455
Sathya Perla77071332013-08-27 16:57:34 +05303456 return 0;
3457err:
3458 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3459 return status;
3460}
3461
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303462int be_update_queues(struct be_adapter *adapter)
3463{
3464 struct net_device *netdev = adapter->netdev;
3465 int status;
3466
3467 if (netif_running(netdev))
3468 be_close(netdev);
3469
3470 be_cancel_worker(adapter);
3471
3472 /* If any vectors have been shared with RoCE we cannot re-program
3473 * the MSIx table.
3474 */
3475 if (!adapter->num_msix_roce_vec)
3476 be_msix_disable(adapter);
3477
3478 be_clear_queues(adapter);
3479
3480 if (!msix_enabled(adapter)) {
3481 status = be_msix_enable(adapter);
3482 if (status)
3483 return status;
3484 }
3485
3486 status = be_setup_queues(adapter);
3487 if (status)
3488 return status;
3489
3490 be_schedule_worker(adapter);
3491
3492 if (netif_running(netdev))
3493 status = be_open(netdev);
3494
3495 return status;
3496}
3497
Sathya Perla5fb379e2009-06-18 00:02:59 +00003498static int be_setup(struct be_adapter *adapter)
3499{
Sathya Perla39f1d942012-05-08 19:41:24 +00003500 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303501 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003502 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503
Sathya Perla30128032011-11-10 19:17:57 +00003504 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003505
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003506 if (!lancer_chip(adapter))
3507 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003508
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003509 status = be_get_config(adapter);
3510 if (status)
3511 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003512
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003513 status = be_msix_enable(adapter);
3514 if (status)
3515 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003516
Sathya Perla77071332013-08-27 16:57:34 +05303517 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3518 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3519 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3520 en_flags |= BE_IF_FLAGS_RSS;
3521 en_flags = en_flags & be_if_cap_flags(adapter);
3522 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3523 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003524 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003525 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003526
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303527 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3528 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303529 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303530 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003531 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003532 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003534 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003535
Sathya Perla95046b92013-07-23 15:25:02 +05303536 status = be_mac_setup(adapter);
3537 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003538 goto err;
3539
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003540 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003541
Somnath Koture9e2a902013-10-24 14:37:53 +05303542 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3543 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3544 adapter->fw_ver);
3545 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3546 }
3547
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003548 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003549 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003550
3551 be_set_rx_mode(adapter->netdev);
3552
Suresh Reddy76a9e082014-01-15 13:23:40 +05303553 be_cmd_get_acpi_wol_cap(adapter);
3554
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003555 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003556
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003557 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3558 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003559 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003560
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303561 if (be_physfn(adapter))
3562 be_cmd_set_logical_link_config(adapter,
3563 IFLA_VF_LINK_STATE_AUTO, 0);
3564
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303565 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303566 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003567 be_vf_setup(adapter);
3568 else
3569 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003570 }
3571
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003572 status = be_cmd_get_phy_info(adapter);
3573 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003574 adapter->phy.fc_autoneg = 1;
3575
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303576 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303577 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003578 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003579err:
3580 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581 return status;
3582}
3583
Ivan Vecera66268732011-12-08 01:31:21 +00003584#ifdef CONFIG_NET_POLL_CONTROLLER
3585static void be_netpoll(struct net_device *netdev)
3586{
3587 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003588 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003589 int i;
3590
Sathya Perlae49cc342012-11-27 19:50:02 +00003591 for_all_evt_queues(adapter, eqo, i) {
3592 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3593 napi_schedule(&eqo->napi);
3594 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003595
3596 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003597}
3598#endif
3599
Ajit Khaparde84517482009-09-04 03:12:16 +00003600#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003601static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003602
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003603static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003604 const u8 *p, u32 img_start, int image_size,
3605 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003606{
3607 u32 crc_offset;
3608 u8 flashed_crc[4];
3609 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003610
3611 crc_offset = hdr_size + img_start + image_size - 4;
3612
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003613 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003614
3615 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003616 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003617 if (status) {
3618 dev_err(&adapter->pdev->dev,
3619 "could not get crc from flash, not flashing redboot\n");
3620 return false;
3621 }
3622
3623 /*update redboot only if crc does not match*/
3624 if (!memcmp(flashed_crc, p, 4))
3625 return false;
3626 else
3627 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003628}
3629
Sathya Perla306f1342011-08-02 19:57:45 +00003630static bool phy_flashing_required(struct be_adapter *adapter)
3631{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003632 return (adapter->phy.phy_type == TN_8022 &&
3633 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003634}
3635
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003636static bool is_comp_in_ufi(struct be_adapter *adapter,
3637 struct flash_section_info *fsec, int type)
3638{
3639 int i = 0, img_type = 0;
3640 struct flash_section_info_g2 *fsec_g2 = NULL;
3641
Sathya Perlaca34fe32012-11-06 17:48:56 +00003642 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003643 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3644
3645 for (i = 0; i < MAX_FLASH_COMP; i++) {
3646 if (fsec_g2)
3647 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3648 else
3649 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3650
3651 if (img_type == type)
3652 return true;
3653 }
3654 return false;
3655
3656}
3657
Jingoo Han4188e7d2013-08-05 18:02:02 +09003658static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003659 int header_size,
3660 const struct firmware *fw)
3661{
3662 struct flash_section_info *fsec = NULL;
3663 const u8 *p = fw->data;
3664
3665 p += header_size;
3666 while (p < (fw->data + fw->size)) {
3667 fsec = (struct flash_section_info *)p;
3668 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3669 return fsec;
3670 p += 32;
3671 }
3672 return NULL;
3673}
3674
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003675static int be_flash(struct be_adapter *adapter, const u8 *img,
3676 struct be_dma_mem *flash_cmd, int optype, int img_size)
3677{
3678 u32 total_bytes = 0, flash_op, num_bytes = 0;
3679 int status = 0;
3680 struct be_cmd_write_flashrom *req = flash_cmd->va;
3681
3682 total_bytes = img_size;
3683 while (total_bytes) {
3684 num_bytes = min_t(u32, 32*1024, total_bytes);
3685
3686 total_bytes -= num_bytes;
3687
3688 if (!total_bytes) {
3689 if (optype == OPTYPE_PHY_FW)
3690 flash_op = FLASHROM_OPER_PHY_FLASH;
3691 else
3692 flash_op = FLASHROM_OPER_FLASH;
3693 } else {
3694 if (optype == OPTYPE_PHY_FW)
3695 flash_op = FLASHROM_OPER_PHY_SAVE;
3696 else
3697 flash_op = FLASHROM_OPER_SAVE;
3698 }
3699
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003700 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003701 img += num_bytes;
3702 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3703 flash_op, num_bytes);
3704 if (status) {
3705 if (status == ILLEGAL_IOCTL_REQ &&
3706 optype == OPTYPE_PHY_FW)
3707 break;
3708 dev_err(&adapter->pdev->dev,
3709 "cmd to write to flash rom failed.\n");
3710 return status;
3711 }
3712 }
3713 return 0;
3714}
3715
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003716/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003717static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003718 const struct firmware *fw,
3719 struct be_dma_mem *flash_cmd,
3720 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003721
Ajit Khaparde84517482009-09-04 03:12:16 +00003722{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003723 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003724 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003725 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003726 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003727 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003728 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003729
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003730 struct flash_comp gen3_flash_types[] = {
3731 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3732 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3733 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3734 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3735 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3736 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3737 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3738 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3739 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3740 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3741 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3742 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3743 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3744 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3745 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3746 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3747 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3748 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3749 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3750 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003751 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003752
3753 struct flash_comp gen2_flash_types[] = {
3754 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3755 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3756 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3757 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3758 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3759 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3760 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3761 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3762 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3763 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3764 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3765 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3766 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3767 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3768 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3769 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003770 };
3771
Sathya Perlaca34fe32012-11-06 17:48:56 +00003772 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003773 pflashcomp = gen3_flash_types;
3774 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003775 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003776 } else {
3777 pflashcomp = gen2_flash_types;
3778 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003779 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003780 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003781
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003782 /* Get flash section info*/
3783 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3784 if (!fsec) {
3785 dev_err(&adapter->pdev->dev,
3786 "Invalid Cookie. UFI corrupted ?\n");
3787 return -1;
3788 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003789 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003790 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003791 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003792
3793 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3794 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3795 continue;
3796
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003797 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3798 !phy_flashing_required(adapter))
3799 continue;
3800
3801 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3802 redboot = be_flash_redboot(adapter, fw->data,
3803 pflashcomp[i].offset, pflashcomp[i].size,
3804 filehdr_size + img_hdrs_size);
3805 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003806 continue;
3807 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003808
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003809 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003810 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003811 if (p + pflashcomp[i].size > fw->data + fw->size)
3812 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003813
3814 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3815 pflashcomp[i].size);
3816 if (status) {
3817 dev_err(&adapter->pdev->dev,
3818 "Flashing section type %d failed.\n",
3819 pflashcomp[i].img_type);
3820 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003821 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003822 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003823 return 0;
3824}
3825
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003826static int be_flash_skyhawk(struct be_adapter *adapter,
3827 const struct firmware *fw,
3828 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003829{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003830 int status = 0, i, filehdr_size = 0;
3831 int img_offset, img_size, img_optype, redboot;
3832 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3833 const u8 *p = fw->data;
3834 struct flash_section_info *fsec = NULL;
3835
3836 filehdr_size = sizeof(struct flash_file_hdr_g3);
3837 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3838 if (!fsec) {
3839 dev_err(&adapter->pdev->dev,
3840 "Invalid Cookie. UFI corrupted ?\n");
3841 return -1;
3842 }
3843
3844 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3845 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3846 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3847
3848 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3849 case IMAGE_FIRMWARE_iSCSI:
3850 img_optype = OPTYPE_ISCSI_ACTIVE;
3851 break;
3852 case IMAGE_BOOT_CODE:
3853 img_optype = OPTYPE_REDBOOT;
3854 break;
3855 case IMAGE_OPTION_ROM_ISCSI:
3856 img_optype = OPTYPE_BIOS;
3857 break;
3858 case IMAGE_OPTION_ROM_PXE:
3859 img_optype = OPTYPE_PXE_BIOS;
3860 break;
3861 case IMAGE_OPTION_ROM_FCoE:
3862 img_optype = OPTYPE_FCOE_BIOS;
3863 break;
3864 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3865 img_optype = OPTYPE_ISCSI_BACKUP;
3866 break;
3867 case IMAGE_NCSI:
3868 img_optype = OPTYPE_NCSI_FW;
3869 break;
3870 default:
3871 continue;
3872 }
3873
3874 if (img_optype == OPTYPE_REDBOOT) {
3875 redboot = be_flash_redboot(adapter, fw->data,
3876 img_offset, img_size,
3877 filehdr_size + img_hdrs_size);
3878 if (!redboot)
3879 continue;
3880 }
3881
3882 p = fw->data;
3883 p += filehdr_size + img_offset + img_hdrs_size;
3884 if (p + img_size > fw->data + fw->size)
3885 return -1;
3886
3887 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3888 if (status) {
3889 dev_err(&adapter->pdev->dev,
3890 "Flashing section type %d failed.\n",
3891 fsec->fsec_entry[i].type);
3892 return status;
3893 }
3894 }
3895 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003896}
3897
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003898static int lancer_fw_download(struct be_adapter *adapter,
3899 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003900{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003901#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3902#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3903 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003904 const u8 *data_ptr = NULL;
3905 u8 *dest_image_ptr = NULL;
3906 size_t image_size = 0;
3907 u32 chunk_size = 0;
3908 u32 data_written = 0;
3909 u32 offset = 0;
3910 int status = 0;
3911 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003912 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003913
3914 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3915 dev_err(&adapter->pdev->dev,
3916 "FW Image not properly aligned. "
3917 "Length must be 4 byte aligned.\n");
3918 status = -EINVAL;
3919 goto lancer_fw_exit;
3920 }
3921
3922 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3923 + LANCER_FW_DOWNLOAD_CHUNK;
3924 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003925 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003926 if (!flash_cmd.va) {
3927 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003928 goto lancer_fw_exit;
3929 }
3930
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003931 dest_image_ptr = flash_cmd.va +
3932 sizeof(struct lancer_cmd_req_write_object);
3933 image_size = fw->size;
3934 data_ptr = fw->data;
3935
3936 while (image_size) {
3937 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3938
3939 /* Copy the image chunk content. */
3940 memcpy(dest_image_ptr, data_ptr, chunk_size);
3941
3942 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003943 chunk_size, offset,
3944 LANCER_FW_DOWNLOAD_LOCATION,
3945 &data_written, &change_status,
3946 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003947 if (status)
3948 break;
3949
3950 offset += data_written;
3951 data_ptr += data_written;
3952 image_size -= data_written;
3953 }
3954
3955 if (!status) {
3956 /* Commit the FW written */
3957 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003958 0, offset,
3959 LANCER_FW_DOWNLOAD_LOCATION,
3960 &data_written, &change_status,
3961 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003962 }
3963
3964 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3965 flash_cmd.dma);
3966 if (status) {
3967 dev_err(&adapter->pdev->dev,
3968 "Firmware load error. "
3969 "Status code: 0x%x Additional Status: 0x%x\n",
3970 status, add_status);
3971 goto lancer_fw_exit;
3972 }
3973
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003974 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303975 dev_info(&adapter->pdev->dev,
3976 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003977 status = lancer_physdev_ctrl(adapter,
3978 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003979 if (status) {
3980 dev_err(&adapter->pdev->dev,
3981 "Adapter busy for FW reset.\n"
3982 "New FW will not be active.\n");
3983 goto lancer_fw_exit;
3984 }
3985 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3986 dev_err(&adapter->pdev->dev,
3987 "System reboot required for new FW"
3988 " to be active\n");
3989 }
3990
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003991 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3992lancer_fw_exit:
3993 return status;
3994}
3995
Sathya Perlaca34fe32012-11-06 17:48:56 +00003996#define UFI_TYPE2 2
3997#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003998#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003999#define UFI_TYPE4 4
4000static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004001 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004002{
4003 if (fhdr == NULL)
4004 goto be_get_ufi_exit;
4005
Sathya Perlaca34fe32012-11-06 17:48:56 +00004006 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4007 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004008 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4009 if (fhdr->asic_type_rev == 0x10)
4010 return UFI_TYPE3R;
4011 else
4012 return UFI_TYPE3;
4013 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004014 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004015
4016be_get_ufi_exit:
4017 dev_err(&adapter->pdev->dev,
4018 "UFI and Interface are not compatible for flashing\n");
4019 return -1;
4020}
4021
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004022static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4023{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004024 struct flash_file_hdr_g3 *fhdr3;
4025 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004026 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004027 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004028 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004029
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004030 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004031 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4032 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004033 if (!flash_cmd.va) {
4034 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004035 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004036 }
4037
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004038 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004039 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004040
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004041 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004042
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004043 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4044 for (i = 0; i < num_imgs; i++) {
4045 img_hdr_ptr = (struct image_hdr *)(fw->data +
4046 (sizeof(struct flash_file_hdr_g3) +
4047 i * sizeof(struct image_hdr)));
4048 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004049 switch (ufi_type) {
4050 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004051 status = be_flash_skyhawk(adapter, fw,
4052 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004053 break;
4054 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004055 status = be_flash_BEx(adapter, fw, &flash_cmd,
4056 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004057 break;
4058 case UFI_TYPE3:
4059 /* Do not flash this ufi on BE3-R cards */
4060 if (adapter->asic_rev < 0x10)
4061 status = be_flash_BEx(adapter, fw,
4062 &flash_cmd,
4063 num_imgs);
4064 else {
4065 status = -1;
4066 dev_err(&adapter->pdev->dev,
4067 "Can't load BE3 UFI on BE3R\n");
4068 }
4069 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004070 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004071 }
4072
Sathya Perlaca34fe32012-11-06 17:48:56 +00004073 if (ufi_type == UFI_TYPE2)
4074 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004075 else if (ufi_type == -1)
4076 status = -1;
4077
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004078 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4079 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004080 if (status) {
4081 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004082 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004083 }
4084
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004085 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004086
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004087be_fw_exit:
4088 return status;
4089}
4090
4091int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4092{
4093 const struct firmware *fw;
4094 int status;
4095
4096 if (!netif_running(adapter->netdev)) {
4097 dev_err(&adapter->pdev->dev,
4098 "Firmware load not allowed (interface is down)\n");
4099 return -1;
4100 }
4101
4102 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4103 if (status)
4104 goto fw_exit;
4105
4106 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4107
4108 if (lancer_chip(adapter))
4109 status = lancer_fw_download(adapter, fw);
4110 else
4111 status = be_fw_download(adapter, fw);
4112
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004113 if (!status)
4114 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4115 adapter->fw_on_flash);
4116
Ajit Khaparde84517482009-09-04 03:12:16 +00004117fw_exit:
4118 release_firmware(fw);
4119 return status;
4120}
4121
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004122static int be_ndo_bridge_setlink(struct net_device *dev,
4123 struct nlmsghdr *nlh)
4124{
4125 struct be_adapter *adapter = netdev_priv(dev);
4126 struct nlattr *attr, *br_spec;
4127 int rem;
4128 int status = 0;
4129 u16 mode = 0;
4130
4131 if (!sriov_enabled(adapter))
4132 return -EOPNOTSUPP;
4133
4134 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4135
4136 nla_for_each_nested(attr, br_spec, rem) {
4137 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4138 continue;
4139
4140 mode = nla_get_u16(attr);
4141 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4142 return -EINVAL;
4143
4144 status = be_cmd_set_hsw_config(adapter, 0, 0,
4145 adapter->if_handle,
4146 mode == BRIDGE_MODE_VEPA ?
4147 PORT_FWD_TYPE_VEPA :
4148 PORT_FWD_TYPE_VEB);
4149 if (status)
4150 goto err;
4151
4152 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4153 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4154
4155 return status;
4156 }
4157err:
4158 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4159 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4160
4161 return status;
4162}
4163
4164static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4165 struct net_device *dev,
4166 u32 filter_mask)
4167{
4168 struct be_adapter *adapter = netdev_priv(dev);
4169 int status = 0;
4170 u8 hsw_mode;
4171
4172 if (!sriov_enabled(adapter))
4173 return 0;
4174
4175 /* BE and Lancer chips support VEB mode only */
4176 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4177 hsw_mode = PORT_FWD_TYPE_VEB;
4178 } else {
4179 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4180 adapter->if_handle, &hsw_mode);
4181 if (status)
4182 return 0;
4183 }
4184
4185 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4186 hsw_mode == PORT_FWD_TYPE_VEPA ?
4187 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4188}
4189
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304190#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304191static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4192 __be16 port)
4193{
4194 struct be_adapter *adapter = netdev_priv(netdev);
4195 struct device *dev = &adapter->pdev->dev;
4196 int status;
4197
4198 if (lancer_chip(adapter) || BEx_chip(adapter))
4199 return;
4200
4201 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4202 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4203 be16_to_cpu(port));
4204 dev_info(dev,
4205 "Only one UDP port supported for VxLAN offloads\n");
4206 return;
4207 }
4208
4209 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4210 OP_CONVERT_NORMAL_TO_TUNNEL);
4211 if (status) {
4212 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4213 goto err;
4214 }
4215
4216 status = be_cmd_set_vxlan_port(adapter, port);
4217 if (status) {
4218 dev_warn(dev, "Failed to add VxLAN port\n");
4219 goto err;
4220 }
4221 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4222 adapter->vxlan_port = port;
4223
4224 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4225 be16_to_cpu(port));
4226 return;
4227err:
4228 be_disable_vxlan_offloads(adapter);
4229 return;
4230}
4231
4232static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4233 __be16 port)
4234{
4235 struct be_adapter *adapter = netdev_priv(netdev);
4236
4237 if (lancer_chip(adapter) || BEx_chip(adapter))
4238 return;
4239
4240 if (adapter->vxlan_port != port)
4241 return;
4242
4243 be_disable_vxlan_offloads(adapter);
4244
4245 dev_info(&adapter->pdev->dev,
4246 "Disabled VxLAN offloads for UDP port %d\n",
4247 be16_to_cpu(port));
4248}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304249#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304250
stephen hemmingere5686ad2012-01-05 19:10:25 +00004251static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004252 .ndo_open = be_open,
4253 .ndo_stop = be_close,
4254 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004255 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004256 .ndo_set_mac_address = be_mac_addr_set,
4257 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004258 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004259 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004260 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4261 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004262 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004263 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004264 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004265 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304266 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004267#ifdef CONFIG_NET_POLL_CONTROLLER
4268 .ndo_poll_controller = be_netpoll,
4269#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004270 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4271 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304272#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304273 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304274#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304275#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304276 .ndo_add_vxlan_port = be_add_vxlan_port,
4277 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304278#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004279};
4280
4281static void be_netdev_init(struct net_device *netdev)
4282{
4283 struct be_adapter *adapter = netdev_priv(netdev);
4284
Sathya Perlac9c47142014-03-27 10:46:19 +05304285 if (skyhawk_chip(adapter)) {
4286 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4287 NETIF_F_TSO | NETIF_F_TSO6 |
4288 NETIF_F_GSO_UDP_TUNNEL;
4289 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4290 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004291 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004292 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004293 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004294 if (be_multi_rxq(adapter))
4295 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004296
4297 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004298 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004299
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004300 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004301 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004302
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004303 netdev->priv_flags |= IFF_UNICAST_FLT;
4304
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004305 netdev->flags |= IFF_MULTICAST;
4306
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004307 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004308
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004309 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004310
4311 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004312}
4313
4314static void be_unmap_pci_bars(struct be_adapter *adapter)
4315{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004316 if (adapter->csr)
4317 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004318 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004319 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004320}
4321
Sathya Perlace66f782012-11-06 17:48:58 +00004322static int db_bar(struct be_adapter *adapter)
4323{
4324 if (lancer_chip(adapter) || !be_physfn(adapter))
4325 return 0;
4326 else
4327 return 4;
4328}
4329
4330static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004331{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004332 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004333 adapter->roce_db.size = 4096;
4334 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4335 db_bar(adapter));
4336 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4337 db_bar(adapter));
4338 }
Parav Pandit045508a2012-03-26 14:27:13 +00004339 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004340}
4341
4342static int be_map_pci_bars(struct be_adapter *adapter)
4343{
4344 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004345
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004346 if (BEx_chip(adapter) && be_physfn(adapter)) {
4347 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4348 if (adapter->csr == NULL)
4349 return -ENOMEM;
4350 }
4351
Sathya Perlace66f782012-11-06 17:48:58 +00004352 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004353 if (addr == NULL)
4354 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004355 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004356
4357 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004358 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004360pci_map_err:
4361 be_unmap_pci_bars(adapter);
4362 return -ENOMEM;
4363}
4364
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004365static void be_ctrl_cleanup(struct be_adapter *adapter)
4366{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004367 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004368
4369 be_unmap_pci_bars(adapter);
4370
4371 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004372 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4373 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004374
Sathya Perla5b8821b2011-08-02 19:57:44 +00004375 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004376 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004377 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4378 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004379}
4380
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004381static int be_ctrl_init(struct be_adapter *adapter)
4382{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004383 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4384 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004385 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004386 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004387 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388
Sathya Perlace66f782012-11-06 17:48:58 +00004389 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4390 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4391 SLI_INTF_FAMILY_SHIFT;
4392 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4393
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004394 status = be_map_pci_bars(adapter);
4395 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004396 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004397
4398 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004399 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4400 mbox_mem_alloc->size,
4401 &mbox_mem_alloc->dma,
4402 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004404 status = -ENOMEM;
4405 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004406 }
4407 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4408 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4409 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4410 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004411
Sathya Perla5b8821b2011-08-02 19:57:44 +00004412 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004413 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4414 rx_filter->size, &rx_filter->dma,
4415 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004416 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004417 status = -ENOMEM;
4418 goto free_mbox;
4419 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004420
Ivan Vecera29849612010-12-14 05:43:19 +00004421 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004422 spin_lock_init(&adapter->mcc_lock);
4423 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004424
Suresh Reddy5eeff632014-01-06 13:02:24 +05304425 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004426 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004427 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004428
4429free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004430 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4431 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004432
4433unmap_pci_bars:
4434 be_unmap_pci_bars(adapter);
4435
4436done:
4437 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004438}
4439
4440static void be_stats_cleanup(struct be_adapter *adapter)
4441{
Sathya Perla3abcded2010-10-03 22:12:27 -07004442 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004443
4444 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004445 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4446 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004447}
4448
4449static int be_stats_init(struct be_adapter *adapter)
4450{
Sathya Perla3abcded2010-10-03 22:12:27 -07004451 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004452
Sathya Perlaca34fe32012-11-06 17:48:56 +00004453 if (lancer_chip(adapter))
4454 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4455 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004456 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004457 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004458 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004459 else
4460 /* ALL non-BE ASICs */
4461 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004462
Joe Perchesede23fa2013-08-26 22:45:23 -07004463 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4464 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004465 if (cmd->va == NULL)
4466 return -1;
4467 return 0;
4468}
4469
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004470static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004471{
4472 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004474 if (!adapter)
4475 return;
4476
Parav Pandit045508a2012-03-26 14:27:13 +00004477 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004478 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004479
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004480 cancel_delayed_work_sync(&adapter->func_recovery_work);
4481
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004482 unregister_netdev(adapter->netdev);
4483
Sathya Perla5fb379e2009-06-18 00:02:59 +00004484 be_clear(adapter);
4485
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004486 /* tell fw we're done with firing cmds */
4487 be_cmd_fw_clean(adapter);
4488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004489 be_stats_cleanup(adapter);
4490
4491 be_ctrl_cleanup(adapter);
4492
Sathya Perlad6b6d982012-09-05 01:56:48 +00004493 pci_disable_pcie_error_reporting(pdev);
4494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004495 pci_release_regions(pdev);
4496 pci_disable_device(pdev);
4497
4498 free_netdev(adapter->netdev);
4499}
4500
Sathya Perla39f1d942012-05-08 19:41:24 +00004501static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004502{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304503 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004504
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004505 status = be_cmd_get_cntl_attributes(adapter);
4506 if (status)
4507 return status;
4508
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004509 /* Must be a power of 2 or else MODULO will BUG_ON */
4510 adapter->be_get_temp_freq = 64;
4511
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304512 if (BEx_chip(adapter)) {
4513 level = be_cmd_get_fw_log_level(adapter);
4514 adapter->msg_enable =
4515 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4516 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004517
Sathya Perla92bf14a2013-08-27 16:57:32 +05304518 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004519 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004520}
4521
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004522static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004523{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004524 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004525 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004526
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004527 status = lancer_test_and_set_rdy_state(adapter);
4528 if (status)
4529 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004530
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004531 if (netif_running(adapter->netdev))
4532 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004533
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004534 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004535
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004536 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004537
4538 status = be_setup(adapter);
4539 if (status)
4540 goto err;
4541
4542 if (netif_running(adapter->netdev)) {
4543 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004544 if (status)
4545 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004546 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004547
Somnath Kotur4bebb562013-12-05 12:07:55 +05304548 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004549 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004550err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004551 if (status == -EAGAIN)
4552 dev_err(dev, "Waiting for resource provisioning\n");
4553 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304554 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004555
4556 return status;
4557}
4558
4559static void be_func_recovery_task(struct work_struct *work)
4560{
4561 struct be_adapter *adapter =
4562 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004563 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004564
4565 be_detect_error(adapter);
4566
4567 if (adapter->hw_error && lancer_chip(adapter)) {
4568
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004569 rtnl_lock();
4570 netif_device_detach(adapter->netdev);
4571 rtnl_unlock();
4572
4573 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004574 if (!status)
4575 netif_device_attach(adapter->netdev);
4576 }
4577
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004578 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4579 * no need to attempt further recovery.
4580 */
4581 if (!status || status == -EAGAIN)
4582 schedule_delayed_work(&adapter->func_recovery_work,
4583 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004584}
4585
4586static void be_worker(struct work_struct *work)
4587{
4588 struct be_adapter *adapter =
4589 container_of(work, struct be_adapter, work.work);
4590 struct be_rx_obj *rxo;
4591 int i;
4592
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004593 /* when interrupts are not yet enabled, just reap any pending
4594 * mcc completions */
4595 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004596 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004597 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004598 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004599 goto reschedule;
4600 }
4601
4602 if (!adapter->stats_cmd_sent) {
4603 if (lancer_chip(adapter))
4604 lancer_cmd_get_pport_stats(adapter,
4605 &adapter->stats_cmd);
4606 else
4607 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4608 }
4609
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304610 if (be_physfn(adapter) &&
4611 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004612 be_cmd_get_die_temperature(adapter);
4613
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004614 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304615 /* Replenish RX-queues starved due to memory
4616 * allocation failures.
4617 */
4618 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004619 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004620 }
4621
Sathya Perla2632baf2013-10-01 16:00:00 +05304622 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004623
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004624reschedule:
4625 adapter->work_counter++;
4626 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4627}
4628
Sathya Perla257a3fe2013-06-14 15:54:51 +05304629/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004630static bool be_reset_required(struct be_adapter *adapter)
4631{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304632 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004633}
4634
Sathya Perlad3791422012-09-28 04:39:44 +00004635static char *mc_name(struct be_adapter *adapter)
4636{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304637 char *str = ""; /* default */
4638
4639 switch (adapter->mc_type) {
4640 case UMC:
4641 str = "UMC";
4642 break;
4643 case FLEX10:
4644 str = "FLEX10";
4645 break;
4646 case vNIC1:
4647 str = "vNIC-1";
4648 break;
4649 case nPAR:
4650 str = "nPAR";
4651 break;
4652 case UFP:
4653 str = "UFP";
4654 break;
4655 case vNIC2:
4656 str = "vNIC-2";
4657 break;
4658 default:
4659 str = "";
4660 }
4661
4662 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004663}
4664
4665static inline char *func_name(struct be_adapter *adapter)
4666{
4667 return be_physfn(adapter) ? "PF" : "VF";
4668}
4669
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004670static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004671{
4672 int status = 0;
4673 struct be_adapter *adapter;
4674 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004675 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004676
4677 status = pci_enable_device(pdev);
4678 if (status)
4679 goto do_none;
4680
4681 status = pci_request_regions(pdev, DRV_NAME);
4682 if (status)
4683 goto disable_dev;
4684 pci_set_master(pdev);
4685
Sathya Perla7f640062012-06-05 19:37:20 +00004686 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004687 if (netdev == NULL) {
4688 status = -ENOMEM;
4689 goto rel_reg;
4690 }
4691 adapter = netdev_priv(netdev);
4692 adapter->pdev = pdev;
4693 pci_set_drvdata(pdev, adapter);
4694 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004695 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004696
Russell King4c15c242013-06-26 23:49:11 +01004697 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004698 if (!status) {
4699 netdev->features |= NETIF_F_HIGHDMA;
4700 } else {
Russell King4c15c242013-06-26 23:49:11 +01004701 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004702 if (status) {
4703 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4704 goto free_netdev;
4705 }
4706 }
4707
Ajit Khapardeea58c182013-10-18 16:06:24 -05004708 if (be_physfn(adapter)) {
4709 status = pci_enable_pcie_error_reporting(pdev);
4710 if (!status)
4711 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4712 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004713
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004714 status = be_ctrl_init(adapter);
4715 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004716 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004717
Sathya Perla2243e2e2009-11-22 22:02:03 +00004718 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004719 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004720 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004721 if (status)
4722 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004723 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004724
Sathya Perla39f1d942012-05-08 19:41:24 +00004725 if (be_reset_required(adapter)) {
4726 status = be_cmd_reset_function(adapter);
4727 if (status)
4728 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004729
Kalesh AP2d177be2013-04-28 22:22:29 +00004730 /* Wait for interrupts to quiesce after an FLR */
4731 msleep(100);
4732 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004733
4734 /* Allow interrupts for other ULPs running on NIC function */
4735 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004736
Kalesh AP2d177be2013-04-28 22:22:29 +00004737 /* tell fw we're ready to fire cmds */
4738 status = be_cmd_fw_init(adapter);
4739 if (status)
4740 goto ctrl_clean;
4741
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004742 status = be_stats_init(adapter);
4743 if (status)
4744 goto ctrl_clean;
4745
Sathya Perla39f1d942012-05-08 19:41:24 +00004746 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004747 if (status)
4748 goto stats_clean;
4749
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004750 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004751 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004752 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004753
Sathya Perla5fb379e2009-06-18 00:02:59 +00004754 status = be_setup(adapter);
4755 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004756 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004757
Sathya Perla3abcded2010-10-03 22:12:27 -07004758 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004759 status = register_netdev(netdev);
4760 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004761 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004762
Parav Pandit045508a2012-03-26 14:27:13 +00004763 be_roce_dev_add(adapter);
4764
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004765 schedule_delayed_work(&adapter->func_recovery_work,
4766 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004767
4768 be_cmd_query_port_name(adapter, &port_name);
4769
Sathya Perlad3791422012-09-28 04:39:44 +00004770 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4771 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004772
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004773 return 0;
4774
Sathya Perla5fb379e2009-06-18 00:02:59 +00004775unsetup:
4776 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004777stats_clean:
4778 be_stats_cleanup(adapter);
4779ctrl_clean:
4780 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004781free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004782 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004783rel_reg:
4784 pci_release_regions(pdev);
4785disable_dev:
4786 pci_disable_device(pdev);
4787do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004788 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004789 return status;
4790}
4791
4792static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4793{
4794 struct be_adapter *adapter = pci_get_drvdata(pdev);
4795 struct net_device *netdev = adapter->netdev;
4796
Suresh Reddy76a9e082014-01-15 13:23:40 +05304797 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004798 be_setup_wol(adapter, true);
4799
Ajit Khaparded4360d62013-11-22 12:51:09 -06004800 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004801 cancel_delayed_work_sync(&adapter->func_recovery_work);
4802
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004803 netif_device_detach(netdev);
4804 if (netif_running(netdev)) {
4805 rtnl_lock();
4806 be_close(netdev);
4807 rtnl_unlock();
4808 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004809 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004810
4811 pci_save_state(pdev);
4812 pci_disable_device(pdev);
4813 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4814 return 0;
4815}
4816
4817static int be_resume(struct pci_dev *pdev)
4818{
4819 int status = 0;
4820 struct be_adapter *adapter = pci_get_drvdata(pdev);
4821 struct net_device *netdev = adapter->netdev;
4822
4823 netif_device_detach(netdev);
4824
4825 status = pci_enable_device(pdev);
4826 if (status)
4827 return status;
4828
Yijing Wang1ca01512013-06-27 20:53:42 +08004829 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004830 pci_restore_state(pdev);
4831
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304832 status = be_fw_wait_ready(adapter);
4833 if (status)
4834 return status;
4835
Ajit Khaparded4360d62013-11-22 12:51:09 -06004836 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004837 /* tell fw we're ready to fire cmds */
4838 status = be_cmd_fw_init(adapter);
4839 if (status)
4840 return status;
4841
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004842 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004843 if (netif_running(netdev)) {
4844 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004845 be_open(netdev);
4846 rtnl_unlock();
4847 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004848
4849 schedule_delayed_work(&adapter->func_recovery_work,
4850 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004851 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004852
Suresh Reddy76a9e082014-01-15 13:23:40 +05304853 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004854 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004855
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004856 return 0;
4857}
4858
Sathya Perla82456b02010-02-17 01:35:37 +00004859/*
4860 * An FLR will stop BE from DMAing any data.
4861 */
4862static void be_shutdown(struct pci_dev *pdev)
4863{
4864 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004865
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004866 if (!adapter)
4867 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004868
Sathya Perla0f4a6822011-03-21 20:49:28 +00004869 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004870 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004871
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004872 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004873
Ajit Khaparde57841862011-04-06 18:08:43 +00004874 be_cmd_reset_function(adapter);
4875
Sathya Perla82456b02010-02-17 01:35:37 +00004876 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004877}
4878
Sathya Perlacf588472010-02-14 21:22:01 +00004879static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4880 pci_channel_state_t state)
4881{
4882 struct be_adapter *adapter = pci_get_drvdata(pdev);
4883 struct net_device *netdev = adapter->netdev;
4884
4885 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4886
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004887 if (!adapter->eeh_error) {
4888 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004889
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004890 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004891
Sathya Perlacf588472010-02-14 21:22:01 +00004892 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004893 netif_device_detach(netdev);
4894 if (netif_running(netdev))
4895 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004896 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004897
4898 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004899 }
Sathya Perlacf588472010-02-14 21:22:01 +00004900
4901 if (state == pci_channel_io_perm_failure)
4902 return PCI_ERS_RESULT_DISCONNECT;
4903
4904 pci_disable_device(pdev);
4905
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004906 /* The error could cause the FW to trigger a flash debug dump.
4907 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004908 * can cause it not to recover; wait for it to finish.
4909 * Wait only for first function as it is needed only once per
4910 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004911 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004912 if (pdev->devfn == 0)
4913 ssleep(30);
4914
Sathya Perlacf588472010-02-14 21:22:01 +00004915 return PCI_ERS_RESULT_NEED_RESET;
4916}
4917
4918static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4919{
4920 struct be_adapter *adapter = pci_get_drvdata(pdev);
4921 int status;
4922
4923 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004924
4925 status = pci_enable_device(pdev);
4926 if (status)
4927 return PCI_ERS_RESULT_DISCONNECT;
4928
4929 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004930 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004931 pci_restore_state(pdev);
4932
4933 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004934 dev_info(&adapter->pdev->dev,
4935 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004936 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004937 if (status)
4938 return PCI_ERS_RESULT_DISCONNECT;
4939
Sathya Perlad6b6d982012-09-05 01:56:48 +00004940 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004941 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004942 return PCI_ERS_RESULT_RECOVERED;
4943}
4944
4945static void be_eeh_resume(struct pci_dev *pdev)
4946{
4947 int status = 0;
4948 struct be_adapter *adapter = pci_get_drvdata(pdev);
4949 struct net_device *netdev = adapter->netdev;
4950
4951 dev_info(&adapter->pdev->dev, "EEH resume\n");
4952
4953 pci_save_state(pdev);
4954
Kalesh AP2d177be2013-04-28 22:22:29 +00004955 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004956 if (status)
4957 goto err;
4958
Kalesh AP2d177be2013-04-28 22:22:29 +00004959 /* tell fw we're ready to fire cmds */
4960 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004961 if (status)
4962 goto err;
4963
Sathya Perlacf588472010-02-14 21:22:01 +00004964 status = be_setup(adapter);
4965 if (status)
4966 goto err;
4967
4968 if (netif_running(netdev)) {
4969 status = be_open(netdev);
4970 if (status)
4971 goto err;
4972 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004973
4974 schedule_delayed_work(&adapter->func_recovery_work,
4975 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004976 netif_device_attach(netdev);
4977 return;
4978err:
4979 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004980}
4981
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004982static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004983 .error_detected = be_eeh_err_detected,
4984 .slot_reset = be_eeh_reset,
4985 .resume = be_eeh_resume,
4986};
4987
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004988static struct pci_driver be_driver = {
4989 .name = DRV_NAME,
4990 .id_table = be_dev_ids,
4991 .probe = be_probe,
4992 .remove = be_remove,
4993 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004994 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004995 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004996 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004997};
4998
4999static int __init be_init_module(void)
5000{
Joe Perches8e95a202009-12-03 07:58:21 +00005001 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5002 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005003 printk(KERN_WARNING DRV_NAME
5004 " : Module param rx_frag_size must be 2048/4096/8192."
5005 " Using 2048\n");
5006 rx_frag_size = 2048;
5007 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005009 return pci_register_driver(&be_driver);
5010}
5011module_init(be_init_module);
5012
5013static void __exit be_exit_module(void)
5014{
5015 pci_unregister_driver(&be_driver);
5016}
5017module_exit(be_exit_module);