blob: 8dc1af47c59a42b1310c8803ab1d3ca4df973780 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
1097 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
1108 for (i = 0; i < VLAN_N_VID; i++)
1109 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001110 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001111
Sathya Perla748b5392014-05-09 13:29:13 +05301112 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001113
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001114 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001115 /* Set to VLAN promisc mode as setting VLAN filter failed */
1116 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1117 goto set_vlan_promisc;
1118 dev_err(&adapter->pdev->dev,
1119 "Setting HW VLAN filtering failed.\n");
1120 } else {
1121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1122 /* hw VLAN filtering re-enabled. */
1123 status = be_cmd_rx_filter(adapter,
1124 BE_FLAGS_VLAN_PROMISC, OFF);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev,
1127 "Disabling VLAN Promiscuous mode.\n");
1128 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001129 }
1130 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001132
Sathya Perlab31c50a2009-09-17 10:30:13 -07001133 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001134
1135set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301136 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1137 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001138
1139 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1140 if (!status) {
1141 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001142 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1143 } else
1144 dev_err(&adapter->pdev->dev,
1145 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001146 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147}
1148
Patrick McHardy80d5c362013-04-19 02:04:28 +00001149static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001152 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001154 /* Packets with VID 0 are always received by Lancer by default */
1155 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301156 return status;
1157
1158 if (adapter->vlan_tag[vid])
1159 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001160
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301162 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001163
Somnath Kotura6b74e02014-01-21 15:50:55 +05301164 status = be_vid_config(adapter);
1165 if (status) {
1166 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001167 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301168 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301169
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001170 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171}
1172
Patrick McHardy80d5c362013-04-19 02:04:28 +00001173static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174{
1175 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001176 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001178 /* Packets with VID 0 are always received by Lancer by default */
1179 if (lancer_chip(adapter) && vid == 0)
1180 goto ret;
1181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301183 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001184 if (!status)
1185 adapter->vlans_added--;
1186 else
1187 adapter->vlan_tag[vid] = 1;
1188ret:
1189 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190}
1191
Somnath kotur7ad09452014-03-03 14:24:43 +05301192static void be_clear_promisc(struct be_adapter *adapter)
1193{
1194 adapter->promiscuous = false;
1195 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1196
1197 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1198}
1199
Sathya Perlaa54769f2011-10-24 02:45:00 +00001200static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001203 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
1205 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001206 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001207 adapter->promiscuous = true;
1208 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001210
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001211 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001212 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301213 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001214 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001215 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001216 }
1217
Sathya Perlae7b909a2009-11-22 22:01:10 +00001218 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001219 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301220 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001221 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001222 goto done;
1223 }
1224
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001225 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1226 struct netdev_hw_addr *ha;
1227 int i = 1; /* First slot is claimed by the Primary MAC */
1228
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1230 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0);
1232 }
1233
Sathya Perla92bf14a2013-08-27 16:57:32 +05301234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1236 adapter->promiscuous = true;
1237 goto done;
1238 }
1239
1240 netdev_for_each_uc_addr(ha, adapter->netdev) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 }
1247
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1249
1250 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1251 if (status) {
Sathya Perla748b5392014-05-09 13:29:13 +05301252 dev_info(&adapter->pdev->dev,
1253 "Exhausted multicast HW filters.\n");
1254 dev_info(&adapter->pdev->dev,
1255 "Disabling HW multicast filtering.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001256 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1257 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001258done:
1259 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260}
1261
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001262static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1263{
1264 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001265 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001266 int status;
1267
Sathya Perla11ac75e2011-12-13 00:58:50 +00001268 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001269 return -EPERM;
1270
Sathya Perla11ac75e2011-12-13 00:58:50 +00001271 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001272 return -EINVAL;
1273
Sathya Perla3175d8c2013-07-23 15:25:03 +05301274 if (BEx_chip(adapter)) {
1275 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1276 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001277
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1279 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301280 } else {
1281 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1282 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001283 }
1284
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001285 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001286 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301287 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001288 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001289 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001290
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001291 return status;
1292}
1293
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001294static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301295 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001296{
1297 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001298 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001299
Sathya Perla11ac75e2011-12-13 00:58:50 +00001300 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001301 return -EPERM;
1302
Sathya Perla11ac75e2011-12-13 00:58:50 +00001303 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001304 return -EINVAL;
1305
1306 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001307 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001308 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1309 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001310 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301311 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001312
1313 return 0;
1314}
1315
Sathya Perla748b5392014-05-09 13:29:13 +05301316static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001319 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001320 int status = 0;
1321
Sathya Perla11ac75e2011-12-13 00:58:50 +00001322 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001323 return -EPERM;
1324
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001325 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001326 return -EINVAL;
1327
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001328 if (vlan || qos) {
1329 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301330 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001331 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1332 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001333 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001334 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301335 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1336 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001337 }
1338
Somnath Koturc5022242014-03-03 14:24:20 +05301339 if (!status)
1340 vf_cfg->vlan_tag = vlan;
1341 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301343 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001344 return status;
1345}
1346
Sathya Perla748b5392014-05-09 13:29:13 +05301347static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001348{
1349 struct be_adapter *adapter = netdev_priv(netdev);
1350 int status = 0;
1351
Sathya Perla11ac75e2011-12-13 00:58:50 +00001352 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001353 return -EPERM;
1354
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001355 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001356 return -EINVAL;
1357
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001358 if (rate < 100 || rate > 10000) {
1359 dev_err(&adapter->pdev->dev,
1360 "tx rate must be between 100 and 10000 Mbps\n");
1361 return -EINVAL;
1362 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001363
Sathya Perlaa4018012014-03-27 10:46:18 +05301364 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001365 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001366 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301367 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001368 else
1369 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001370 return status;
1371}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301372static int be_set_vf_link_state(struct net_device *netdev, int vf,
1373 int link_state)
1374{
1375 struct be_adapter *adapter = netdev_priv(netdev);
1376 int status;
1377
1378 if (!sriov_enabled(adapter))
1379 return -EPERM;
1380
1381 if (vf >= adapter->num_vfs)
1382 return -EINVAL;
1383
1384 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1385 if (!status)
1386 adapter->vf_cfg[vf].plink_tracking = link_state;
1387
1388 return status;
1389}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001390
Sathya Perla2632baf2013-10-01 16:00:00 +05301391static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1392 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
Sathya Perla2632baf2013-10-01 16:00:00 +05301394 aic->rx_pkts_prev = rx_pkts;
1395 aic->tx_reqs_prev = tx_pkts;
1396 aic->jiffies = now;
1397}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001398
Sathya Perla2632baf2013-10-01 16:00:00 +05301399static void be_eqd_update(struct be_adapter *adapter)
1400{
1401 struct be_set_eqd set_eqd[MAX_EVT_QS];
1402 int eqd, i, num = 0, start;
1403 struct be_aic_obj *aic;
1404 struct be_eq_obj *eqo;
1405 struct be_rx_obj *rxo;
1406 struct be_tx_obj *txo;
1407 u64 rx_pkts, tx_pkts;
1408 ulong now;
1409 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410
Sathya Perla2632baf2013-10-01 16:00:00 +05301411 for_all_evt_queues(adapter, eqo, i) {
1412 aic = &adapter->aic_obj[eqo->idx];
1413 if (!aic->enable) {
1414 if (aic->jiffies)
1415 aic->jiffies = 0;
1416 eqd = aic->et_eqd;
1417 goto modify_eqd;
1418 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419
Sathya Perla2632baf2013-10-01 16:00:00 +05301420 rxo = &adapter->rx_obj[eqo->idx];
1421 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001422 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301423 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001424 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001425
Sathya Perla2632baf2013-10-01 16:00:00 +05301426 txo = &adapter->tx_obj[eqo->idx];
1427 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001428 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301429 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001430 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001431
Sathya Perla4097f662009-03-24 16:40:13 -07001432
Sathya Perla2632baf2013-10-01 16:00:00 +05301433 /* Skip, if wrapped around or first calculation */
1434 now = jiffies;
1435 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1436 rx_pkts < aic->rx_pkts_prev ||
1437 tx_pkts < aic->tx_reqs_prev) {
1438 be_aic_update(aic, rx_pkts, tx_pkts, now);
1439 continue;
1440 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001441
Sathya Perla2632baf2013-10-01 16:00:00 +05301442 delta = jiffies_to_msecs(now - aic->jiffies);
1443 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1444 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1445 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001446
Sathya Perla2632baf2013-10-01 16:00:00 +05301447 if (eqd < 8)
1448 eqd = 0;
1449 eqd = min_t(u32, eqd, aic->max_eqd);
1450 eqd = max_t(u32, eqd, aic->min_eqd);
1451
1452 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001453modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301454 if (eqd != aic->prev_eqd) {
1455 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1456 set_eqd[num].eq_id = eqo->q.id;
1457 aic->prev_eqd = eqd;
1458 num++;
1459 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001460 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301461
1462 if (num)
1463 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001464}
1465
Sathya Perla3abcded2010-10-03 22:12:27 -07001466static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301467 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001468{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001469 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001470
Sathya Perlaab1594e2011-07-25 19:10:15 +00001471 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001472 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001473 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001474 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001475 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001476 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001477 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001478 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001479 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480}
1481
Sathya Perla2e588f82011-03-11 02:49:26 +00001482static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001483{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001484 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301485 * Also ignore ipcksm for ipv6 pkts
1486 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001487 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301488 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001489}
1490
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301491static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001493 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001495 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301496 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 BUG_ON(!rx_page_info->page);
1500
Sathya Perlae50287b2014-03-04 12:14:38 +05301501 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001502 dma_unmap_page(&adapter->pdev->dev,
1503 dma_unmap_addr(rx_page_info, bus),
1504 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301505 rx_page_info->last_frag = false;
1506 } else {
1507 dma_sync_single_for_cpu(&adapter->pdev->dev,
1508 dma_unmap_addr(rx_page_info, bus),
1509 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001510 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301512 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 atomic_dec(&rxq->used);
1514 return rx_page_info;
1515}
1516
1517/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001518static void be_rx_compl_discard(struct be_rx_obj *rxo,
1519 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001524 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301525 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001526 put_page(page_info->page);
1527 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528 }
1529}
1530
1531/*
1532 * skb_fill_rx_data forms a complete skb for an ether frame
1533 * indicated by rxcp.
1534 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001535static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1536 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 u16 i, j;
1540 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 u8 *start;
1542
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301543 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 start = page_address(page_info->page) + page_info->page_offset;
1545 prefetch(start);
1546
1547 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001548 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 skb->len = curr_frag_len;
1551 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001552 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 /* Complete packet has now been moved to data */
1554 put_page(page_info->page);
1555 skb->data_len = 0;
1556 skb->tail += curr_frag_len;
1557 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001558 hdr_len = ETH_HLEN;
1559 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001561 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 skb_shinfo(skb)->frags[0].page_offset =
1563 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301564 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1565 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001567 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 skb->tail += hdr_len;
1569 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001570 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perla2e588f82011-03-11 02:49:26 +00001572 if (rxcp->pkt_size <= rx_frag_size) {
1573 BUG_ON(rxcp->num_rcvd != 1);
1574 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 }
1576
1577 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001578 remaining = rxcp->pkt_size - curr_frag_len;
1579 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301580 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001581 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001583 /* Coalesce all frags from the same physical page in one slot */
1584 if (page_info->page_offset == 0) {
1585 /* Fresh page */
1586 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001587 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001588 skb_shinfo(skb)->frags[j].page_offset =
1589 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001590 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001591 skb_shinfo(skb)->nr_frags++;
1592 } else {
1593 put_page(page_info->page);
1594 }
1595
Eric Dumazet9e903e02011-10-18 21:00:24 +00001596 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 skb->len += curr_frag_len;
1598 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001599 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001600 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001601 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001603 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604}
1605
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001606/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301607static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001610 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001611 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001613
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001614 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001615 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001616 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001617 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618 return;
1619 }
1620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001623 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001624 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001625 else
1626 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001628 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001629 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001630 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001631 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301632
1633 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301634 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Jiri Pirko343e43c2011-08-25 02:50:51 +00001636 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001637 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001638
1639 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640}
1641
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001642/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001643static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1644 struct napi_struct *napi,
1645 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001647 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001649 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001650 u16 remaining, curr_frag_len;
1651 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001653 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001654 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001655 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001656 return;
1657 }
1658
Sathya Perla2e588f82011-03-11 02:49:26 +00001659 remaining = rxcp->pkt_size;
1660 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301661 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662
1663 curr_frag_len = min(remaining, rx_frag_size);
1664
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001665 /* Coalesce all frags from the same physical page in one slot */
1666 if (i == 0 || page_info->page_offset == 0) {
1667 /* First frag or Fresh page */
1668 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001669 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001670 skb_shinfo(skb)->frags[j].page_offset =
1671 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001672 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001673 } else {
1674 put_page(page_info->page);
1675 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001676 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001677 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 memset(page_info, 0, sizeof(*page_info));
1680 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001681 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001683 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001684 skb->len = rxcp->pkt_size;
1685 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001686 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001687 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001688 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001689 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301690
1691 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301692 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001693
Jiri Pirko343e43c2011-08-25 02:50:51 +00001694 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001695 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001696
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001697 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698}
1699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1701 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702{
Sathya Perla2e588f82011-03-11 02:49:26 +00001703 rxcp->pkt_size =
1704 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1705 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1706 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1707 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001708 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001709 rxcp->ip_csum =
1710 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1711 rxcp->l4_csum =
1712 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1713 rxcp->ipv6 =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001715 rxcp->num_rcvd =
1716 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1717 rxcp->pkt_type =
1718 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001719 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001720 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001721 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301722 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001723 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301724 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1725 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001726 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001727 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301728 rxcp->tunneled =
1729 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001730}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1733 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001734{
1735 rxcp->pkt_size =
1736 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1737 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1738 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1739 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001740 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001741 rxcp->ip_csum =
1742 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1743 rxcp->l4_csum =
1744 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1745 rxcp->ipv6 =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001747 rxcp->num_rcvd =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1749 rxcp->pkt_type =
1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001751 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001752 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001753 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301754 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001755 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301756 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1757 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001758 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001759 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001760 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1761 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001762}
1763
1764static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1765{
1766 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1767 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1768 struct be_adapter *adapter = rxo->adapter;
1769
1770 /* For checking the valid bit it is Ok to use either definition as the
1771 * valid bit is at the same position in both v0 and v1 Rx compl */
1772 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 return NULL;
1774
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001775 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001776 be_dws_le_to_cpu(compl, sizeof(*compl));
1777
1778 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001779 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001780 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001781 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001782
Somnath Koture38b1702013-05-29 22:55:56 +00001783 if (rxcp->ip_frag)
1784 rxcp->l4_csum = 0;
1785
Sathya Perla15d72182011-03-21 20:49:26 +00001786 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301787 /* In QNQ modes, if qnq bit is not set, then the packet was
1788 * tagged only with the transparent outer vlan-tag and must
1789 * not be treated as a vlan packet by host
1790 */
1791 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001792 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001793
Sathya Perla15d72182011-03-21 20:49:26 +00001794 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001795 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001796
Somnath Kotur939cf302011-08-18 21:51:49 -07001797 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001798 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001799 rxcp->vlanf = 0;
1800 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001801
1802 /* As the compl has been parsed, reset it; we wont touch it again */
1803 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perla3abcded2010-10-03 22:12:27 -07001805 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 return rxcp;
1807}
1808
Eric Dumazet1829b082011-03-01 05:48:12 +00001809static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001812
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001814 gfp |= __GFP_COMP;
1815 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816}
1817
1818/*
1819 * Allocate a page, split it to fragments of size rx_frag_size and post as
1820 * receive buffers to BE
1821 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001822static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823{
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001825 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001828 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 struct be_eth_rx_d *rxd;
1830 u64 page_dmaaddr = 0, frag_dmaaddr;
1831 u32 posted, page_offset = 0;
1832
Sathya Perla3abcded2010-10-03 22:12:27 -07001833 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1835 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001836 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001838 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 break;
1840 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001841 page_dmaaddr = dma_map_page(dev, pagep, 0,
1842 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001843 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001844 if (dma_mapping_error(dev, page_dmaaddr)) {
1845 put_page(pagep);
1846 pagep = NULL;
1847 rx_stats(rxo)->rx_post_fail++;
1848 break;
1849 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301850 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 } else {
1852 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301853 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301855 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
1858 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301859 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1861 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
1863 /* Any space left in the current big page for another frag? */
1864 if ((page_offset + rx_frag_size + rx_frag_size) >
1865 adapter->big_page_size) {
1866 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301867 page_info->last_frag = true;
1868 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1869 } else {
1870 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001872
1873 prev_page_info = page_info;
1874 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001875 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301877
1878 /* Mark the last frag of a page when we break out of the above loop
1879 * with no more slots available in the RXQ
1880 */
1881 if (pagep) {
1882 prev_page_info->last_frag = true;
1883 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1884 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885
1886 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301888 if (rxo->rx_post_starved)
1889 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001890 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001891 } else if (atomic_read(&rxq->used) == 0) {
1892 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001893 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895}
1896
Sathya Perla5fb379e2009-06-18 00:02:59 +00001897static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1900
1901 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1902 return NULL;
1903
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001904 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1906
1907 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1908
1909 queue_tail_inc(tx_cq);
1910 return txcp;
1911}
1912
Sathya Perla3c8def92011-06-12 20:01:58 +00001913static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301914 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915{
Sathya Perla3c8def92011-06-12 20:01:58 +00001916 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001917 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001918 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001920 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1921 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001923 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001925 sent_skbs[txq->tail] = NULL;
1926
1927 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001928 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001930 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001932 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001933 unmap_tx_frag(&adapter->pdev->dev, wrb,
1934 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001935 unmap_skb_hdr = false;
1936
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 num_wrbs++;
1938 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001939 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001941 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001942 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943}
1944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945/* Return the number of events in the event queue */
1946static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001947{
1948 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001950
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951 do {
1952 eqe = queue_tail_node(&eqo->q);
1953 if (eqe->evt == 0)
1954 break;
1955
1956 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001957 eqe->evt = 0;
1958 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 queue_tail_inc(&eqo->q);
1960 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001961
1962 return num;
1963}
1964
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001965/* Leaves the EQ is disarmed state */
1966static void be_eq_clean(struct be_eq_obj *eqo)
1967{
1968 int num = events_get(eqo);
1969
1970 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1971}
1972
1973static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974{
1975 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001976 struct be_queue_info *rxq = &rxo->q;
1977 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001978 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001979 struct be_adapter *adapter = rxo->adapter;
1980 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981
Sathya Perlad23e9462012-12-17 19:38:51 +00001982 /* Consume pending rx completions.
1983 * Wait for the flush completion (identified by zero num_rcvd)
1984 * to arrive. Notify CQ even when there are no more CQ entries
1985 * for HW to flush partially coalesced CQ entries.
1986 * In Lancer, there is no need to wait for flush compl.
1987 */
1988 for (;;) {
1989 rxcp = be_rx_compl_get(rxo);
1990 if (rxcp == NULL) {
1991 if (lancer_chip(adapter))
1992 break;
1993
1994 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1995 dev_warn(&adapter->pdev->dev,
1996 "did not receive flush compl\n");
1997 break;
1998 }
1999 be_cq_notify(adapter, rx_cq->id, true, 0);
2000 mdelay(1);
2001 } else {
2002 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002003 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002004 if (rxcp->num_rcvd == 0)
2005 break;
2006 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 }
2008
Sathya Perlad23e9462012-12-17 19:38:51 +00002009 /* After cleanup, leave the CQ in unarmed state */
2010 be_cq_notify(adapter, rx_cq->id, false, 0);
2011
2012 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302013 while (atomic_read(&rxq->used) > 0) {
2014 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 put_page(page_info->page);
2016 memset(page_info, 0, sizeof(*page_info));
2017 }
2018 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002019 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020}
2021
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002022static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002024 struct be_tx_obj *txo;
2025 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002026 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002027 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002028 struct sk_buff *sent_skb;
2029 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002030 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302032 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002033 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002034 pending_txqs = adapter->num_tx_qs;
2035
2036 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302037 cmpl = 0;
2038 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002039 txq = &txo->q;
2040 while ((txcp = be_tx_compl_get(&txo->cq))) {
2041 end_idx =
2042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp);
2044 num_wrbs += be_tx_compl_process(adapter, txo,
2045 end_idx);
2046 cmpl++;
2047 }
2048 if (cmpl) {
2049 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2050 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302051 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002052 }
2053 if (atomic_read(&txq->used) == 0)
2054 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002055 }
2056
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302057 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002058 break;
2059
2060 mdelay(1);
2061 } while (true);
2062
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002063 for_all_tx_queues(adapter, txo, i) {
2064 txq = &txo->q;
2065 if (atomic_read(&txq->used))
2066 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2067 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002068
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002069 /* free posted tx for which compls will never arrive */
2070 while (atomic_read(&txq->used)) {
2071 sent_skb = txo->sent_skb_list[txq->tail];
2072 end_idx = txq->tail;
2073 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2074 &dummy_wrb);
2075 index_adv(&end_idx, num_wrbs - 1, txq->len);
2076 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2077 atomic_sub(num_wrbs, &txq->used);
2078 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002079 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080}
2081
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002082static void be_evt_queues_destroy(struct be_adapter *adapter)
2083{
2084 struct be_eq_obj *eqo;
2085 int i;
2086
2087 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002088 if (eqo->q.created) {
2089 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002090 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302091 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302092 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002093 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002094 be_queue_free(adapter, &eqo->q);
2095 }
2096}
2097
2098static int be_evt_queues_create(struct be_adapter *adapter)
2099{
2100 struct be_queue_info *eq;
2101 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302102 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 int i, rc;
2104
Sathya Perla92bf14a2013-08-27 16:57:32 +05302105 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2106 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107
2108 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302109 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2110 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302111 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302112 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002113 eqo->adapter = adapter;
2114 eqo->tx_budget = BE_TX_BUDGET;
2115 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302116 aic->max_eqd = BE_MAX_EQD;
2117 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118
2119 eq = &eqo->q;
2120 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302121 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 if (rc)
2123 return rc;
2124
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302125 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 if (rc)
2127 return rc;
2128 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002129 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130}
2131
Sathya Perla5fb379e2009-06-18 00:02:59 +00002132static void be_mcc_queues_destroy(struct be_adapter *adapter)
2133{
2134 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002135
Sathya Perla8788fdc2009-07-27 22:52:03 +00002136 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002137 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002138 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002139 be_queue_free(adapter, q);
2140
Sathya Perla8788fdc2009-07-27 22:52:03 +00002141 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002142 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002143 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002144 be_queue_free(adapter, q);
2145}
2146
2147/* Must be called only after TX qs are created as MCC shares TX EQ */
2148static int be_mcc_queues_create(struct be_adapter *adapter)
2149{
2150 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002151
Sathya Perla8788fdc2009-07-27 22:52:03 +00002152 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002153 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302154 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002155 goto err;
2156
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 /* Use the default EQ for MCC completions */
2158 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002159 goto mcc_cq_free;
2160
Sathya Perla8788fdc2009-07-27 22:52:03 +00002161 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002162 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2163 goto mcc_cq_destroy;
2164
Sathya Perla8788fdc2009-07-27 22:52:03 +00002165 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002166 goto mcc_q_free;
2167
2168 return 0;
2169
2170mcc_q_free:
2171 be_queue_free(adapter, q);
2172mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002173 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002174mcc_cq_free:
2175 be_queue_free(adapter, cq);
2176err:
2177 return -1;
2178}
2179
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180static void be_tx_queues_destroy(struct be_adapter *adapter)
2181{
2182 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002183 struct be_tx_obj *txo;
2184 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185
Sathya Perla3c8def92011-06-12 20:01:58 +00002186 for_all_tx_queues(adapter, txo, i) {
2187 q = &txo->q;
2188 if (q->created)
2189 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2190 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191
Sathya Perla3c8def92011-06-12 20:01:58 +00002192 q = &txo->cq;
2193 if (q->created)
2194 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2195 be_queue_free(adapter, q);
2196 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197}
2198
Sathya Perla77071332013-08-27 16:57:34 +05302199static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002202 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302203 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204
Sathya Perla92bf14a2013-08-27 16:57:32 +05302205 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002206
Sathya Perla3c8def92011-06-12 20:01:58 +00002207 for_all_tx_queues(adapter, txo, i) {
2208 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2210 sizeof(struct be_eth_tx_compl));
2211 if (status)
2212 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213
John Stultz827da442013-10-07 15:51:58 -07002214 u64_stats_init(&txo->stats.sync);
2215 u64_stats_init(&txo->stats.sync_compl);
2216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 /* If num_evt_qs is less than num_tx_qs, then more than
2218 * one txq share an eq
2219 */
2220 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2221 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2222 if (status)
2223 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2226 sizeof(struct be_eth_wrb));
2227 if (status)
2228 return status;
2229
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002230 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 if (status)
2232 return status;
2233 }
2234
Sathya Perlad3791422012-09-28 04:39:44 +00002235 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2236 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237 return 0;
2238}
2239
2240static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241{
2242 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002243 struct be_rx_obj *rxo;
2244 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002247 q = &rxo->cq;
2248 if (q->created)
2249 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2250 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252}
2253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002255{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002257 struct be_rx_obj *rxo;
2258 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259
Sathya Perla92bf14a2013-08-27 16:57:32 +05302260 /* We can create as many RSS rings as there are EQs. */
2261 adapter->num_rx_qs = adapter->num_evt_qs;
2262
2263 /* We'll use RSS only if atleast 2 RSS rings are supported.
2264 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302266 if (adapter->num_rx_qs > 1)
2267 adapter->num_rx_qs++;
2268
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002270 for_all_rx_queues(adapter, rxo, i) {
2271 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002272 cq = &rxo->cq;
2273 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302274 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277
John Stultz827da442013-10-07 15:51:58 -07002278 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2280 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002281 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002283 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284
Sathya Perlad3791422012-09-28 04:39:44 +00002285 dev_info(&adapter->pdev->dev,
2286 "created %d RSS queue(s) and 1 default RX queue\n",
2287 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002289}
2290
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291static irqreturn_t be_intx(int irq, void *dev)
2292{
Sathya Perlae49cc342012-11-27 19:50:02 +00002293 struct be_eq_obj *eqo = dev;
2294 struct be_adapter *adapter = eqo->adapter;
2295 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002297 /* IRQ is not expected when NAPI is scheduled as the EQ
2298 * will not be armed.
2299 * But, this can happen on Lancer INTx where it takes
2300 * a while to de-assert INTx or in BE2 where occasionaly
2301 * an interrupt may be raised even when EQ is unarmed.
2302 * If NAPI is already scheduled, then counting & notifying
2303 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002304 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002305 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002306 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002307 __napi_schedule(&eqo->napi);
2308 if (num_evts)
2309 eqo->spurious_intr = 0;
2310 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002311 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002312
2313 /* Return IRQ_HANDLED only for the the first spurious intr
2314 * after a valid intr to stop the kernel from branding
2315 * this irq as a bad one!
2316 */
2317 if (num_evts || eqo->spurious_intr++ == 0)
2318 return IRQ_HANDLED;
2319 else
2320 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321}
2322
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326
Sathya Perla0b545a62012-11-23 00:27:18 +00002327 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2328 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329 return IRQ_HANDLED;
2330}
2331
Sathya Perla2e588f82011-03-11 02:49:26 +00002332static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333{
Somnath Koture38b1702013-05-29 22:55:56 +00002334 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335}
2336
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302338 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339{
Sathya Perla3abcded2010-10-03 22:12:27 -07002340 struct be_adapter *adapter = rxo->adapter;
2341 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002342 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343 u32 work_done;
2344
2345 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002346 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 if (!rxcp)
2348 break;
2349
Sathya Perla12004ae2011-08-02 19:57:46 +00002350 /* Is it a flush compl that has no data */
2351 if (unlikely(rxcp->num_rcvd == 0))
2352 goto loop_continue;
2353
2354 /* Discard compl with partial DMA Lancer B0 */
2355 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002357 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002358 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002359
Sathya Perla12004ae2011-08-02 19:57:46 +00002360 /* On BE drop pkts that arrive due to imperfect filtering in
2361 * promiscuous mode on some skews
2362 */
2363 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302364 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002365 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002366 goto loop_continue;
2367 }
2368
Sathya Perla6384a4d2013-10-25 10:40:16 +05302369 /* Don't do gro when we're busy_polling */
2370 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002372 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302373 be_rx_compl_process(rxo, napi, rxcp);
2374
Sathya Perla12004ae2011-08-02 19:57:46 +00002375loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002376 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377 }
2378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 if (work_done) {
2380 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002381
Sathya Perla6384a4d2013-10-25 10:40:16 +05302382 /* When an rx-obj gets into post_starved state, just
2383 * let be_worker do the posting.
2384 */
2385 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2386 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390 return work_done;
2391}
2392
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2394 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 for (work_done = 0; work_done < budget; work_done++) {
2400 txcp = be_tx_compl_get(&txo->cq);
2401 if (!txcp)
2402 break;
2403 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302404 AMAP_GET_BITS(struct
2405 amap_eth_tx_compl,
2406 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 }
2408
2409 if (work_done) {
2410 be_cq_notify(adapter, txo->cq.id, true, work_done);
2411 atomic_sub(num_wrbs, &txo->q.used);
2412
2413 /* As Tx wrbs have been freed up, wake up netdev queue
2414 * if it was stopped due to lack of tx wrbs. */
2415 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302416 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002418 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2421 tx_stats(txo)->tx_compl += work_done;
2422 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2423 }
2424 return (work_done < budget); /* Done */
2425}
Sathya Perla3c8def92011-06-12 20:01:58 +00002426
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302427int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428{
2429 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2430 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002431 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302432 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002434
Sathya Perla0b545a62012-11-23 00:27:18 +00002435 num_evts = events_get(eqo);
2436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 /* Process all TXQs serviced by this EQ */
2438 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2439 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2440 eqo->tx_budget, i);
2441 if (!tx_done)
2442 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443 }
2444
Sathya Perla6384a4d2013-10-25 10:40:16 +05302445 if (be_lock_napi(eqo)) {
2446 /* This loop will iterate twice for EQ0 in which
2447 * completions of the last RXQ (default one) are also processed
2448 * For other EQs the loop iterates only once
2449 */
2450 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2451 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2452 max_work = max(work, max_work);
2453 }
2454 be_unlock_napi(eqo);
2455 } else {
2456 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002457 }
2458
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 if (is_mcc_eqo(eqo))
2460 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 if (max_work < budget) {
2463 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002464 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 } else {
2466 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002467 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002468 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470}
2471
Sathya Perla6384a4d2013-10-25 10:40:16 +05302472#ifdef CONFIG_NET_RX_BUSY_POLL
2473static int be_busy_poll(struct napi_struct *napi)
2474{
2475 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2476 struct be_adapter *adapter = eqo->adapter;
2477 struct be_rx_obj *rxo;
2478 int i, work = 0;
2479
2480 if (!be_lock_busy_poll(eqo))
2481 return LL_FLUSH_BUSY;
2482
2483 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2484 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2485 if (work)
2486 break;
2487 }
2488
2489 be_unlock_busy_poll(eqo);
2490 return work;
2491}
2492#endif
2493
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002494void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002495{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002496 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2497 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002498 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302499 bool error_detected = false;
2500 struct device *dev = &adapter->pdev->dev;
2501 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002502
Sathya Perlad23e9462012-12-17 19:38:51 +00002503 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002504 return;
2505
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002506 if (lancer_chip(adapter)) {
2507 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2508 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2509 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302510 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002511 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302512 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302513 adapter->hw_error = true;
2514 /* Do not log error messages if its a FW reset */
2515 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2516 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2517 dev_info(dev, "Firmware update in progress\n");
2518 } else {
2519 error_detected = true;
2520 dev_err(dev, "Error detected in the card\n");
2521 dev_err(dev, "ERR: sliport status 0x%x\n",
2522 sliport_status);
2523 dev_err(dev, "ERR: sliport error1 0x%x\n",
2524 sliport_err1);
2525 dev_err(dev, "ERR: sliport error2 0x%x\n",
2526 sliport_err2);
2527 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002528 }
2529 } else {
2530 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302531 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002532 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302533 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002534 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302535 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002536 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302537 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002538
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002539 ue_lo = (ue_lo & ~ue_lo_mask);
2540 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002541
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302542 /* On certain platforms BE hardware can indicate spurious UEs.
2543 * Allow HW to stop working completely in case of a real UE.
2544 * Hence not setting the hw_error for UE detection.
2545 */
2546
2547 if (ue_lo || ue_hi) {
2548 error_detected = true;
2549 dev_err(dev,
2550 "Unrecoverable Error detected in the adapter");
2551 dev_err(dev, "Please reboot server to recover");
2552 if (skyhawk_chip(adapter))
2553 adapter->hw_error = true;
2554 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2555 if (ue_lo & 1)
2556 dev_err(dev, "UE: %s bit set\n",
2557 ue_status_low_desc[i]);
2558 }
2559 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2560 if (ue_hi & 1)
2561 dev_err(dev, "UE: %s bit set\n",
2562 ue_status_hi_desc[i]);
2563 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302564 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002565 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302566 if (error_detected)
2567 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002568}
2569
Sathya Perla8d56ff12009-11-22 22:02:26 +00002570static void be_msix_disable(struct be_adapter *adapter)
2571{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002572 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002573 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002574 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302575 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002576 }
2577}
2578
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002579static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002581 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002582 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583
Sathya Perla92bf14a2013-08-27 16:57:32 +05302584 /* If RoCE is supported, program the max number of NIC vectors that
2585 * may be configured via set-channels, along with vectors needed for
2586 * RoCe. Else, just program the number we'll use initially.
2587 */
2588 if (be_roce_supported(adapter))
2589 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2590 2 * num_online_cpus());
2591 else
2592 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002593
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002594 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002595 adapter->msix_entries[i].entry = i;
2596
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002597 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2598 MIN_MSIX_VECTORS, num_vec);
2599 if (num_vec < 0)
2600 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002601
Sathya Perla92bf14a2013-08-27 16:57:32 +05302602 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2603 adapter->num_msix_roce_vec = num_vec / 2;
2604 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2605 adapter->num_msix_roce_vec);
2606 }
2607
2608 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2609
2610 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2611 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002612 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002613
2614fail:
2615 dev_warn(dev, "MSIx enable failed\n");
2616
2617 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2618 if (!be_physfn(adapter))
2619 return num_vec;
2620 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621}
2622
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002623static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302624 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002625{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302626 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002627}
2628
2629static int be_msix_register(struct be_adapter *adapter)
2630{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 struct net_device *netdev = adapter->netdev;
2632 struct be_eq_obj *eqo;
2633 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002635 for_all_evt_queues(adapter, eqo, i) {
2636 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2637 vec = be_msix_vec_get(adapter, eqo);
2638 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002639 if (status)
2640 goto err_msix;
2641 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002642
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002644err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2646 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2647 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302648 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002649 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650 return status;
2651}
2652
2653static int be_irq_register(struct be_adapter *adapter)
2654{
2655 struct net_device *netdev = adapter->netdev;
2656 int status;
2657
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002658 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659 status = be_msix_register(adapter);
2660 if (status == 0)
2661 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002662 /* INTx is not supported for VF */
2663 if (!be_physfn(adapter))
2664 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002665 }
2666
Sathya Perlae49cc342012-11-27 19:50:02 +00002667 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668 netdev->irq = adapter->pdev->irq;
2669 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002670 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671 if (status) {
2672 dev_err(&adapter->pdev->dev,
2673 "INTx request IRQ failed - err %d\n", status);
2674 return status;
2675 }
2676done:
2677 adapter->isr_registered = true;
2678 return 0;
2679}
2680
2681static void be_irq_unregister(struct be_adapter *adapter)
2682{
2683 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002684 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002685 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686
2687 if (!adapter->isr_registered)
2688 return;
2689
2690 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002691 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002692 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002693 goto done;
2694 }
2695
2696 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002697 for_all_evt_queues(adapter, eqo, i)
2698 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002699
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700done:
2701 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002702}
2703
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002704static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002705{
2706 struct be_queue_info *q;
2707 struct be_rx_obj *rxo;
2708 int i;
2709
2710 for_all_rx_queues(adapter, rxo, i) {
2711 q = &rxo->q;
2712 if (q->created) {
2713 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002714 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002715 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002717 }
2718}
2719
Sathya Perla889cd4b2010-05-30 23:33:45 +00002720static int be_close(struct net_device *netdev)
2721{
2722 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 struct be_eq_obj *eqo;
2724 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002725
Kalesh APe1ad8e32014-04-14 16:12:41 +05302726 /* This protection is needed as be_close() may be called even when the
2727 * adapter is in cleared state (after eeh perm failure)
2728 */
2729 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2730 return 0;
2731
Parav Pandit045508a2012-03-26 14:27:13 +00002732 be_roce_dev_close(adapter);
2733
Ivan Veceradff345c52013-11-27 08:59:32 +01002734 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2735 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002736 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302737 be_disable_busy_poll(eqo);
2738 }
David S. Miller71237b62013-11-28 18:53:36 -05002739 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002740 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002741
2742 be_async_mcc_disable(adapter);
2743
2744 /* Wait for all pending tx completions to arrive so that
2745 * all tx skbs are freed.
2746 */
Sathya Perlafba87552013-05-08 02:05:50 +00002747 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302748 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002749
2750 be_rx_qs_destroy(adapter);
2751
Ajit Khaparded11a3472013-11-18 10:44:37 -06002752 for (i = 1; i < (adapter->uc_macs + 1); i++)
2753 be_cmd_pmac_del(adapter, adapter->if_handle,
2754 adapter->pmac_id[i], 0);
2755 adapter->uc_macs = 0;
2756
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002757 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002758 if (msix_enabled(adapter))
2759 synchronize_irq(be_msix_vec_get(adapter, eqo));
2760 else
2761 synchronize_irq(netdev->irq);
2762 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002763 }
2764
Sathya Perla889cd4b2010-05-30 23:33:45 +00002765 be_irq_unregister(adapter);
2766
Sathya Perla482c9e72011-06-29 23:33:17 +00002767 return 0;
2768}
2769
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002771{
2772 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002773 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302774 u8 rss_hkey[RSS_HASH_KEY_LEN];
2775 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002776
2777 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002778 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2779 sizeof(struct be_eth_rx_d));
2780 if (rc)
2781 return rc;
2782 }
2783
2784 /* The FW would like the default RXQ to be created first */
2785 rxo = default_rxo(adapter);
2786 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2787 adapter->if_handle, false, &rxo->rss_id);
2788 if (rc)
2789 return rc;
2790
2791 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002792 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793 rx_frag_size, adapter->if_handle,
2794 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002795 if (rc)
2796 return rc;
2797 }
2798
2799 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302800 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2801 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002802 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302803 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002804 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302805 rss->rsstable[j + i] = rxo->rss_id;
2806 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002807 }
2808 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302809 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002811
2812 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302813 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302815 } else {
2816 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302817 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302818 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002819
Venkata Duvvurue2557872014-04-21 15:38:00 +05302820 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302821 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302822 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302823 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302824 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302825 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002826 }
2827
Venkata Duvvurue2557872014-04-21 15:38:00 +05302828 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2829
Sathya Perla482c9e72011-06-29 23:33:17 +00002830 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002832 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002833 return 0;
2834}
2835
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002836static int be_open(struct net_device *netdev)
2837{
2838 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002839 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002840 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002841 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002842 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002843 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002846 if (status)
2847 goto err;
2848
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002849 status = be_irq_register(adapter);
2850 if (status)
2851 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002852
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002853 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002854 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002856 for_all_tx_queues(adapter, txo, i)
2857 be_cq_notify(adapter, txo->cq.id, true, 0);
2858
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002859 be_async_mcc_enable(adapter);
2860
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002861 for_all_evt_queues(adapter, eqo, i) {
2862 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302863 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002864 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2865 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002866 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867
Sathya Perla323ff712012-09-28 04:39:43 +00002868 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002869 if (!status)
2870 be_link_status_update(adapter, link_status);
2871
Sathya Perlafba87552013-05-08 02:05:50 +00002872 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002873 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302874
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302875#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302876 if (skyhawk_chip(adapter))
2877 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302878#endif
2879
Sathya Perla889cd4b2010-05-30 23:33:45 +00002880 return 0;
2881err:
2882 be_close(adapter->netdev);
2883 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002884}
2885
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002886static int be_setup_wol(struct be_adapter *adapter, bool enable)
2887{
2888 struct be_dma_mem cmd;
2889 int status = 0;
2890 u8 mac[ETH_ALEN];
2891
2892 memset(mac, 0, ETH_ALEN);
2893
2894 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002895 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2896 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002897 if (cmd.va == NULL)
2898 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002899
2900 if (enable) {
2901 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302902 PCICFG_PM_CONTROL_OFFSET,
2903 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002904 if (status) {
2905 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002906 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002907 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2908 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002909 return status;
2910 }
2911 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302912 adapter->netdev->dev_addr,
2913 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002914 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2915 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2916 } else {
2917 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2918 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2919 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2920 }
2921
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002922 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002923 return status;
2924}
2925
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002926/*
2927 * Generate a seed MAC address from the PF MAC Address using jhash.
2928 * MAC Address for VFs are assigned incrementally starting from the seed.
2929 * These addresses are programmed in the ASIC by the PF and the VF driver
2930 * queries for the MAC address during its probe.
2931 */
Sathya Perla4c876612013-02-03 20:30:11 +00002932static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002933{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002934 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002935 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002936 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002937 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002938
2939 be_vf_eth_addr_generate(adapter, mac);
2940
Sathya Perla11ac75e2011-12-13 00:58:50 +00002941 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302942 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002943 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002944 vf_cfg->if_handle,
2945 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302946 else
2947 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2948 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002949
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002950 if (status)
2951 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302952 "Mac address assignment failed for VF %d\n",
2953 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002954 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002955 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002956
2957 mac[5] += 1;
2958 }
2959 return status;
2960}
2961
Sathya Perla4c876612013-02-03 20:30:11 +00002962static int be_vfs_mac_query(struct be_adapter *adapter)
2963{
2964 int status, vf;
2965 u8 mac[ETH_ALEN];
2966 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002967
2968 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302969 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2970 mac, vf_cfg->if_handle,
2971 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002972 if (status)
2973 return status;
2974 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2975 }
2976 return 0;
2977}
2978
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002979static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002980{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002981 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002982 u32 vf;
2983
Sathya Perla257a3fe2013-06-14 15:54:51 +05302984 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002985 dev_warn(&adapter->pdev->dev,
2986 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002987 goto done;
2988 }
2989
Sathya Perlab4c1df92013-05-08 02:05:47 +00002990 pci_disable_sriov(adapter->pdev);
2991
Sathya Perla11ac75e2011-12-13 00:58:50 +00002992 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302993 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002994 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2995 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302996 else
2997 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2998 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002999
Sathya Perla11ac75e2011-12-13 00:58:50 +00003000 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3001 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003002done:
3003 kfree(adapter->vf_cfg);
3004 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003005}
3006
Sathya Perla77071332013-08-27 16:57:34 +05303007static void be_clear_queues(struct be_adapter *adapter)
3008{
3009 be_mcc_queues_destroy(adapter);
3010 be_rx_cqs_destroy(adapter);
3011 be_tx_queues_destroy(adapter);
3012 be_evt_queues_destroy(adapter);
3013}
3014
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303015static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003016{
Sathya Perla191eb752012-02-23 18:50:13 +00003017 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3018 cancel_delayed_work_sync(&adapter->work);
3019 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3020 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303021}
3022
Somnath Koturb05004a2013-12-05 12:08:16 +05303023static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303024{
3025 int i;
3026
Somnath Koturb05004a2013-12-05 12:08:16 +05303027 if (adapter->pmac_id) {
3028 for (i = 0; i < (adapter->uc_macs + 1); i++)
3029 be_cmd_pmac_del(adapter, adapter->if_handle,
3030 adapter->pmac_id[i], 0);
3031 adapter->uc_macs = 0;
3032
3033 kfree(adapter->pmac_id);
3034 adapter->pmac_id = NULL;
3035 }
3036}
3037
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303038#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303039static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3040{
3041 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3042 be_cmd_manage_iface(adapter, adapter->if_handle,
3043 OP_CONVERT_TUNNEL_TO_NORMAL);
3044
3045 if (adapter->vxlan_port)
3046 be_cmd_set_vxlan_port(adapter, 0);
3047
3048 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3049 adapter->vxlan_port = 0;
3050}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303051#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303052
Somnath Koturb05004a2013-12-05 12:08:16 +05303053static int be_clear(struct be_adapter *adapter)
3054{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303055 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003056
Sathya Perla11ac75e2011-12-13 00:58:50 +00003057 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003058 be_vf_clear(adapter);
3059
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303060#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303061 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303062#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303063 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303064 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003065
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003066 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003067
Sathya Perla77071332013-08-27 16:57:34 +05303068 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003069
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003070 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303071 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003072 return 0;
3073}
3074
Sathya Perla4c876612013-02-03 20:30:11 +00003075static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003076{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303077 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003078 struct be_vf_cfg *vf_cfg;
3079 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003080 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003081
Sathya Perla4c876612013-02-03 20:30:11 +00003082 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3083 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003084
Sathya Perla4c876612013-02-03 20:30:11 +00003085 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303086 if (!BE3_chip(adapter)) {
3087 status = be_cmd_get_profile_config(adapter, &res,
3088 vf + 1);
3089 if (!status)
3090 cap_flags = res.if_cap_flags;
3091 }
Sathya Perla4c876612013-02-03 20:30:11 +00003092
3093 /* If a FW profile exists, then cap_flags are updated */
3094 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303095 BE_IF_FLAGS_BROADCAST |
3096 BE_IF_FLAGS_MULTICAST);
3097 status =
3098 be_cmd_if_create(adapter, cap_flags, en_flags,
3099 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003100 if (status)
3101 goto err;
3102 }
3103err:
3104 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003105}
3106
Sathya Perla39f1d942012-05-08 19:41:24 +00003107static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003108{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003109 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003110 int vf;
3111
Sathya Perla39f1d942012-05-08 19:41:24 +00003112 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3113 GFP_KERNEL);
3114 if (!adapter->vf_cfg)
3115 return -ENOMEM;
3116
Sathya Perla11ac75e2011-12-13 00:58:50 +00003117 for_all_vfs(adapter, vf_cfg, vf) {
3118 vf_cfg->if_handle = -1;
3119 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003120 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003121 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003122}
3123
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003124static int be_vf_setup(struct be_adapter *adapter)
3125{
Sathya Perla4c876612013-02-03 20:30:11 +00003126 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303127 struct be_vf_cfg *vf_cfg;
3128 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303129 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303130 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003131
Sathya Perla257a3fe2013-06-14 15:54:51 +05303132 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003133 if (old_vfs) {
3134 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3135 if (old_vfs != num_vfs)
3136 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3137 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003138 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303139 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003140 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303141 be_max_vfs(adapter), num_vfs);
3142 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003143 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003144 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003145 }
3146
3147 status = be_vf_setup_init(adapter);
3148 if (status)
3149 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003150
Sathya Perla4c876612013-02-03 20:30:11 +00003151 if (old_vfs) {
3152 for_all_vfs(adapter, vf_cfg, vf) {
3153 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3154 if (status)
3155 goto err;
3156 }
3157 } else {
3158 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003159 if (status)
3160 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003161 }
3162
Sathya Perla4c876612013-02-03 20:30:11 +00003163 if (old_vfs) {
3164 status = be_vfs_mac_query(adapter);
3165 if (status)
3166 goto err;
3167 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003168 status = be_vf_eth_addr_config(adapter);
3169 if (status)
3170 goto err;
3171 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003172
Sathya Perla11ac75e2011-12-13 00:58:50 +00003173 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303174 /* Allow VFs to programs MAC/VLAN filters */
3175 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3176 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3177 status = be_cmd_set_fn_privileges(adapter,
3178 privileges |
3179 BE_PRIV_FILTMGMT,
3180 vf + 1);
3181 if (!status)
3182 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3183 vf);
3184 }
3185
Sathya Perla4c876612013-02-03 20:30:11 +00003186 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3187 * Allow full available bandwidth
3188 */
3189 if (BE3_chip(adapter) && !old_vfs)
Sathya Perlaa4018012014-03-27 10:46:18 +05303190 be_cmd_config_qos(adapter, 1000, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003191
3192 status = be_cmd_link_status_query(adapter, &lnk_speed,
3193 NULL, vf + 1);
3194 if (!status)
3195 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003196
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303197 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303198 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303199 be_cmd_set_logical_link_config(adapter,
3200 IFLA_VF_LINK_STATE_AUTO,
3201 vf+1);
3202 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003203 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003204
3205 if (!old_vfs) {
3206 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3207 if (status) {
3208 dev_err(dev, "SRIOV enable failed\n");
3209 adapter->num_vfs = 0;
3210 goto err;
3211 }
3212 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003213 return 0;
3214err:
Sathya Perla4c876612013-02-03 20:30:11 +00003215 dev_err(dev, "VF setup failed\n");
3216 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003217 return status;
3218}
3219
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303220/* Converting function_mode bits on BE3 to SH mc_type enums */
3221
3222static u8 be_convert_mc_type(u32 function_mode)
3223{
3224 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3225 return vNIC1;
3226 else if (function_mode & FLEX10_MODE)
3227 return FLEX10;
3228 else if (function_mode & VNIC_MODE)
3229 return vNIC2;
3230 else if (function_mode & UMC_ENABLED)
3231 return UMC;
3232 else
3233 return MC_NONE;
3234}
3235
Sathya Perla92bf14a2013-08-27 16:57:32 +05303236/* On BE2/BE3 FW does not suggest the supported limits */
3237static void BEx_get_resources(struct be_adapter *adapter,
3238 struct be_resources *res)
3239{
3240 struct pci_dev *pdev = adapter->pdev;
3241 bool use_sriov = false;
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303242 int max_vfs = 0;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303243
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303244 if (be_physfn(adapter) && BE3_chip(adapter)) {
3245 be_cmd_get_profile_config(adapter, res, 0);
3246 /* Some old versions of BE3 FW don't report max_vfs value */
3247 if (res->max_vfs == 0) {
3248 max_vfs = pci_sriov_get_totalvfs(pdev);
3249 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3250 }
3251 use_sriov = res->max_vfs && sriov_want(adapter);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303252 }
3253
3254 if (be_physfn(adapter))
3255 res->max_uc_mac = BE_UC_PMAC_COUNT;
3256 else
3257 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3258
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303259 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3260
3261 if (be_is_mc(adapter)) {
3262 /* Assuming that there are 4 channels per port,
3263 * when multi-channel is enabled
3264 */
3265 if (be_is_qnq_mode(adapter))
3266 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3267 else
3268 /* In a non-qnq multichannel mode, the pvid
3269 * takes up one vlan entry
3270 */
3271 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3272 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303273 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303274 }
3275
Sathya Perla92bf14a2013-08-27 16:57:32 +05303276 res->max_mcast_mac = BE_MAX_MC;
3277
Vasundhara Volama5243da2014-03-11 18:53:07 +05303278 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3279 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3280 * *only* if it is RSS-capable.
3281 */
3282 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3283 !be_physfn(adapter) || (be_is_mc(adapter) &&
3284 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303285 res->max_tx_qs = 1;
3286 else
3287 res->max_tx_qs = BE3_MAX_TX_QS;
3288
3289 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3290 !use_sriov && be_physfn(adapter))
3291 res->max_rss_qs = (adapter->be3_native) ?
3292 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3293 res->max_rx_qs = res->max_rss_qs + 1;
3294
Suresh Reddye3dc8672014-01-06 13:02:25 +05303295 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303296 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303297 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3298 else
3299 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303300
3301 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3302 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3303 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3304}
3305
Sathya Perla30128032011-11-10 19:17:57 +00003306static void be_setup_init(struct be_adapter *adapter)
3307{
3308 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003309 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003310 adapter->if_handle = -1;
3311 adapter->be3_native = false;
3312 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003313 if (be_physfn(adapter))
3314 adapter->cmd_privileges = MAX_PRIVILEGES;
3315 else
3316 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003317}
3318
Sathya Perla92bf14a2013-08-27 16:57:32 +05303319static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003320{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303321 struct device *dev = &adapter->pdev->dev;
3322 struct be_resources res = {0};
3323 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003324
Sathya Perla92bf14a2013-08-27 16:57:32 +05303325 if (BEx_chip(adapter)) {
3326 BEx_get_resources(adapter, &res);
3327 adapter->res = res;
3328 }
3329
Sathya Perla92bf14a2013-08-27 16:57:32 +05303330 /* For Lancer, SH etc read per-function resource limits from FW.
3331 * GET_FUNC_CONFIG returns per function guaranteed limits.
3332 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3333 */
Sathya Perla4c876612013-02-03 20:30:11 +00003334 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303335 status = be_cmd_get_func_config(adapter, &res);
3336 if (status)
3337 return status;
3338
3339 /* If RoCE may be enabled stash away half the EQs for RoCE */
3340 if (be_roce_supported(adapter))
3341 res.max_evt_qs /= 2;
3342 adapter->res = res;
3343
3344 if (be_physfn(adapter)) {
3345 status = be_cmd_get_profile_config(adapter, &res, 0);
3346 if (status)
3347 return status;
3348 adapter->res.max_vfs = res.max_vfs;
3349 }
3350
3351 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3352 be_max_txqs(adapter), be_max_rxqs(adapter),
3353 be_max_rss(adapter), be_max_eqs(adapter),
3354 be_max_vfs(adapter));
3355 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3356 be_max_uc(adapter), be_max_mc(adapter),
3357 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003358 }
3359
Sathya Perla92bf14a2013-08-27 16:57:32 +05303360 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003361}
3362
Sathya Perla39f1d942012-05-08 19:41:24 +00003363/* Routine to query per function resource limits */
3364static int be_get_config(struct be_adapter *adapter)
3365{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303366 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003367 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003368
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003369 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3370 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003371 &adapter->function_caps,
3372 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003373 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303374 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003375
Vasundhara Volam542963b2014-01-15 13:23:33 +05303376 if (be_physfn(adapter)) {
3377 status = be_cmd_get_active_profile(adapter, &profile_id);
3378 if (!status)
3379 dev_info(&adapter->pdev->dev,
3380 "Using profile 0x%x\n", profile_id);
3381 }
3382
Sathya Perla92bf14a2013-08-27 16:57:32 +05303383 status = be_get_resources(adapter);
3384 if (status)
3385 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003386
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303387 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3388 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303389 if (!adapter->pmac_id)
3390 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003391
Sathya Perla92bf14a2013-08-27 16:57:32 +05303392 /* Sanitize cfg_num_qs based on HW and platform limits */
3393 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3394
3395 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003396}
3397
Sathya Perla95046b92013-07-23 15:25:02 +05303398static int be_mac_setup(struct be_adapter *adapter)
3399{
3400 u8 mac[ETH_ALEN];
3401 int status;
3402
3403 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3404 status = be_cmd_get_perm_mac(adapter, mac);
3405 if (status)
3406 return status;
3407
3408 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3409 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3410 } else {
3411 /* Maybe the HW was reset; dev_addr must be re-programmed */
3412 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3413 }
3414
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003415 /* For BE3-R VFs, the PF programs the initial MAC address */
3416 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3417 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3418 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303419 return 0;
3420}
3421
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303422static void be_schedule_worker(struct be_adapter *adapter)
3423{
3424 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3425 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3426}
3427
Sathya Perla77071332013-08-27 16:57:34 +05303428static int be_setup_queues(struct be_adapter *adapter)
3429{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303430 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303431 int status;
3432
3433 status = be_evt_queues_create(adapter);
3434 if (status)
3435 goto err;
3436
3437 status = be_tx_qs_create(adapter);
3438 if (status)
3439 goto err;
3440
3441 status = be_rx_cqs_create(adapter);
3442 if (status)
3443 goto err;
3444
3445 status = be_mcc_queues_create(adapter);
3446 if (status)
3447 goto err;
3448
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303449 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3450 if (status)
3451 goto err;
3452
3453 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3454 if (status)
3455 goto err;
3456
Sathya Perla77071332013-08-27 16:57:34 +05303457 return 0;
3458err:
3459 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3460 return status;
3461}
3462
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303463int be_update_queues(struct be_adapter *adapter)
3464{
3465 struct net_device *netdev = adapter->netdev;
3466 int status;
3467
3468 if (netif_running(netdev))
3469 be_close(netdev);
3470
3471 be_cancel_worker(adapter);
3472
3473 /* If any vectors have been shared with RoCE we cannot re-program
3474 * the MSIx table.
3475 */
3476 if (!adapter->num_msix_roce_vec)
3477 be_msix_disable(adapter);
3478
3479 be_clear_queues(adapter);
3480
3481 if (!msix_enabled(adapter)) {
3482 status = be_msix_enable(adapter);
3483 if (status)
3484 return status;
3485 }
3486
3487 status = be_setup_queues(adapter);
3488 if (status)
3489 return status;
3490
3491 be_schedule_worker(adapter);
3492
3493 if (netif_running(netdev))
3494 status = be_open(netdev);
3495
3496 return status;
3497}
3498
Sathya Perla5fb379e2009-06-18 00:02:59 +00003499static int be_setup(struct be_adapter *adapter)
3500{
Sathya Perla39f1d942012-05-08 19:41:24 +00003501 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303502 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003503 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003504
Sathya Perla30128032011-11-10 19:17:57 +00003505 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003506
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003507 if (!lancer_chip(adapter))
3508 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003509
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003510 status = be_get_config(adapter);
3511 if (status)
3512 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003513
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003514 status = be_msix_enable(adapter);
3515 if (status)
3516 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003517
Sathya Perla77071332013-08-27 16:57:34 +05303518 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3519 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3520 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3521 en_flags |= BE_IF_FLAGS_RSS;
3522 en_flags = en_flags & be_if_cap_flags(adapter);
3523 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3524 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003525 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003526 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303528 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3529 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303530 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303531 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003532 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003533 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003535 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003536
Sathya Perla95046b92013-07-23 15:25:02 +05303537 status = be_mac_setup(adapter);
3538 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003539 goto err;
3540
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003541 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003542
Somnath Koture9e2a902013-10-24 14:37:53 +05303543 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3544 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3545 adapter->fw_ver);
3546 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3547 }
3548
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003549 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003550 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003551
3552 be_set_rx_mode(adapter->netdev);
3553
Suresh Reddy76a9e082014-01-15 13:23:40 +05303554 be_cmd_get_acpi_wol_cap(adapter);
3555
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003556 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003557
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003558 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3559 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003560 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003561
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303562 if (be_physfn(adapter))
3563 be_cmd_set_logical_link_config(adapter,
3564 IFLA_VF_LINK_STATE_AUTO, 0);
3565
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303566 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303567 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003568 be_vf_setup(adapter);
3569 else
3570 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003571 }
3572
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003573 status = be_cmd_get_phy_info(adapter);
3574 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003575 adapter->phy.fc_autoneg = 1;
3576
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303577 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303578 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003579 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003580err:
3581 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003582 return status;
3583}
3584
Ivan Vecera66268732011-12-08 01:31:21 +00003585#ifdef CONFIG_NET_POLL_CONTROLLER
3586static void be_netpoll(struct net_device *netdev)
3587{
3588 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003589 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003590 int i;
3591
Sathya Perlae49cc342012-11-27 19:50:02 +00003592 for_all_evt_queues(adapter, eqo, i) {
3593 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3594 napi_schedule(&eqo->napi);
3595 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003596
3597 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003598}
3599#endif
3600
Ajit Khaparde84517482009-09-04 03:12:16 +00003601#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003602static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003603
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003604static bool be_flash_redboot(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303605 const u8 *p, u32 img_start, int image_size,
3606 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003607{
3608 u32 crc_offset;
3609 u8 flashed_crc[4];
3610 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003611
3612 crc_offset = hdr_size + img_start + image_size - 4;
3613
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003614 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003615
Sathya Perla748b5392014-05-09 13:29:13 +05303616 status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003617 if (status) {
3618 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303619 "could not get crc from flash, not flashing redboot\n");
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003620 return false;
3621 }
3622
3623 /*update redboot only if crc does not match*/
3624 if (!memcmp(flashed_crc, p, 4))
3625 return false;
3626 else
3627 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003628}
3629
Sathya Perla306f1342011-08-02 19:57:45 +00003630static bool phy_flashing_required(struct be_adapter *adapter)
3631{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003632 return (adapter->phy.phy_type == TN_8022 &&
3633 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003634}
3635
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003636static bool is_comp_in_ufi(struct be_adapter *adapter,
3637 struct flash_section_info *fsec, int type)
3638{
3639 int i = 0, img_type = 0;
3640 struct flash_section_info_g2 *fsec_g2 = NULL;
3641
Sathya Perlaca34fe32012-11-06 17:48:56 +00003642 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003643 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3644
3645 for (i = 0; i < MAX_FLASH_COMP; i++) {
3646 if (fsec_g2)
3647 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3648 else
3649 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3650
3651 if (img_type == type)
3652 return true;
3653 }
3654 return false;
3655
3656}
3657
Jingoo Han4188e7d2013-08-05 18:02:02 +09003658static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303659 int header_size,
3660 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003661{
3662 struct flash_section_info *fsec = NULL;
3663 const u8 *p = fw->data;
3664
3665 p += header_size;
3666 while (p < (fw->data + fw->size)) {
3667 fsec = (struct flash_section_info *)p;
3668 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3669 return fsec;
3670 p += 32;
3671 }
3672 return NULL;
3673}
3674
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003675static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303676 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003677{
3678 u32 total_bytes = 0, flash_op, num_bytes = 0;
3679 int status = 0;
3680 struct be_cmd_write_flashrom *req = flash_cmd->va;
3681
3682 total_bytes = img_size;
3683 while (total_bytes) {
3684 num_bytes = min_t(u32, 32*1024, total_bytes);
3685
3686 total_bytes -= num_bytes;
3687
3688 if (!total_bytes) {
3689 if (optype == OPTYPE_PHY_FW)
3690 flash_op = FLASHROM_OPER_PHY_FLASH;
3691 else
3692 flash_op = FLASHROM_OPER_FLASH;
3693 } else {
3694 if (optype == OPTYPE_PHY_FW)
3695 flash_op = FLASHROM_OPER_PHY_SAVE;
3696 else
3697 flash_op = FLASHROM_OPER_SAVE;
3698 }
3699
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003700 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003701 img += num_bytes;
3702 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303703 flash_op, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704 if (status) {
3705 if (status == ILLEGAL_IOCTL_REQ &&
3706 optype == OPTYPE_PHY_FW)
3707 break;
3708 dev_err(&adapter->pdev->dev,
3709 "cmd to write to flash rom failed.\n");
3710 return status;
3711 }
3712 }
3713 return 0;
3714}
3715
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003716/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003717static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303718 const struct firmware *fw,
3719 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003720{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003721 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003722 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003723 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003724 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003725 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003726 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003727
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003728 struct flash_comp gen3_flash_types[] = {
3729 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3730 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3731 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3732 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3733 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3734 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3735 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3736 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3737 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3738 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3739 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3740 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3741 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3742 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3743 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3744 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3745 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3746 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3747 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3748 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003749 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003750
3751 struct flash_comp gen2_flash_types[] = {
3752 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3753 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3754 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3755 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3756 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3757 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3758 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3759 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3760 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3761 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3762 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3763 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3764 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3765 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3766 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3767 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003768 };
3769
Sathya Perlaca34fe32012-11-06 17:48:56 +00003770 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003771 pflashcomp = gen3_flash_types;
3772 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003773 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003774 } else {
3775 pflashcomp = gen2_flash_types;
3776 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003777 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003778 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003779
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003780 /* Get flash section info*/
3781 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3782 if (!fsec) {
3783 dev_err(&adapter->pdev->dev,
3784 "Invalid Cookie. UFI corrupted ?\n");
3785 return -1;
3786 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003787 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003788 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003789 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003790
3791 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3792 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3793 continue;
3794
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003795 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3796 !phy_flashing_required(adapter))
3797 continue;
3798
3799 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3800 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303801 pflashcomp[i].offset,
3802 pflashcomp[i].size,
3803 filehdr_size +
3804 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003805 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003806 continue;
3807 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003808
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003809 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003810 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003811 if (p + pflashcomp[i].size > fw->data + fw->size)
3812 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003813
3814 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303815 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003816 if (status) {
3817 dev_err(&adapter->pdev->dev,
3818 "Flashing section type %d failed.\n",
3819 pflashcomp[i].img_type);
3820 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003821 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003822 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003823 return 0;
3824}
3825
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003826static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303827 const struct firmware *fw,
3828 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003829{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003830 int status = 0, i, filehdr_size = 0;
3831 int img_offset, img_size, img_optype, redboot;
3832 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3833 const u8 *p = fw->data;
3834 struct flash_section_info *fsec = NULL;
3835
3836 filehdr_size = sizeof(struct flash_file_hdr_g3);
3837 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3838 if (!fsec) {
3839 dev_err(&adapter->pdev->dev,
3840 "Invalid Cookie. UFI corrupted ?\n");
3841 return -1;
3842 }
3843
3844 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3845 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3846 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3847
3848 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3849 case IMAGE_FIRMWARE_iSCSI:
3850 img_optype = OPTYPE_ISCSI_ACTIVE;
3851 break;
3852 case IMAGE_BOOT_CODE:
3853 img_optype = OPTYPE_REDBOOT;
3854 break;
3855 case IMAGE_OPTION_ROM_ISCSI:
3856 img_optype = OPTYPE_BIOS;
3857 break;
3858 case IMAGE_OPTION_ROM_PXE:
3859 img_optype = OPTYPE_PXE_BIOS;
3860 break;
3861 case IMAGE_OPTION_ROM_FCoE:
3862 img_optype = OPTYPE_FCOE_BIOS;
3863 break;
3864 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3865 img_optype = OPTYPE_ISCSI_BACKUP;
3866 break;
3867 case IMAGE_NCSI:
3868 img_optype = OPTYPE_NCSI_FW;
3869 break;
3870 default:
3871 continue;
3872 }
3873
3874 if (img_optype == OPTYPE_REDBOOT) {
3875 redboot = be_flash_redboot(adapter, fw->data,
Sathya Perla748b5392014-05-09 13:29:13 +05303876 img_offset, img_size,
3877 filehdr_size +
3878 img_hdrs_size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003879 if (!redboot)
3880 continue;
3881 }
3882
3883 p = fw->data;
3884 p += filehdr_size + img_offset + img_hdrs_size;
3885 if (p + img_size > fw->data + fw->size)
3886 return -1;
3887
3888 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3889 if (status) {
3890 dev_err(&adapter->pdev->dev,
3891 "Flashing section type %d failed.\n",
3892 fsec->fsec_entry[i].type);
3893 return status;
3894 }
3895 }
3896 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003897}
3898
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003899static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303900 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003901{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003902#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3903#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3904 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003905 const u8 *data_ptr = NULL;
3906 u8 *dest_image_ptr = NULL;
3907 size_t image_size = 0;
3908 u32 chunk_size = 0;
3909 u32 data_written = 0;
3910 u32 offset = 0;
3911 int status = 0;
3912 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003913 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003914
3915 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3916 dev_err(&adapter->pdev->dev,
3917 "FW Image not properly aligned. "
3918 "Length must be 4 byte aligned.\n");
3919 status = -EINVAL;
3920 goto lancer_fw_exit;
3921 }
3922
3923 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3924 + LANCER_FW_DOWNLOAD_CHUNK;
3925 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003926 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003927 if (!flash_cmd.va) {
3928 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003929 goto lancer_fw_exit;
3930 }
3931
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003932 dest_image_ptr = flash_cmd.va +
3933 sizeof(struct lancer_cmd_req_write_object);
3934 image_size = fw->size;
3935 data_ptr = fw->data;
3936
3937 while (image_size) {
3938 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3939
3940 /* Copy the image chunk content. */
3941 memcpy(dest_image_ptr, data_ptr, chunk_size);
3942
3943 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003944 chunk_size, offset,
3945 LANCER_FW_DOWNLOAD_LOCATION,
3946 &data_written, &change_status,
3947 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003948 if (status)
3949 break;
3950
3951 offset += data_written;
3952 data_ptr += data_written;
3953 image_size -= data_written;
3954 }
3955
3956 if (!status) {
3957 /* Commit the FW written */
3958 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003959 0, offset,
3960 LANCER_FW_DOWNLOAD_LOCATION,
3961 &data_written, &change_status,
3962 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003963 }
3964
3965 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05303966 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003967 if (status) {
3968 dev_err(&adapter->pdev->dev,
3969 "Firmware load error. "
3970 "Status code: 0x%x Additional Status: 0x%x\n",
3971 status, add_status);
3972 goto lancer_fw_exit;
3973 }
3974
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003975 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303976 dev_info(&adapter->pdev->dev,
3977 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003978 status = lancer_physdev_ctrl(adapter,
3979 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003980 if (status) {
3981 dev_err(&adapter->pdev->dev,
3982 "Adapter busy for FW reset.\n"
3983 "New FW will not be active.\n");
3984 goto lancer_fw_exit;
3985 }
3986 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05303987 dev_err(&adapter->pdev->dev,
3988 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003989 }
3990
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003991 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3992lancer_fw_exit:
3993 return status;
3994}
3995
Sathya Perlaca34fe32012-11-06 17:48:56 +00003996#define UFI_TYPE2 2
3997#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003998#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003999#define UFI_TYPE4 4
4000static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004001 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004002{
4003 if (fhdr == NULL)
4004 goto be_get_ufi_exit;
4005
Sathya Perlaca34fe32012-11-06 17:48:56 +00004006 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4007 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004008 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4009 if (fhdr->asic_type_rev == 0x10)
4010 return UFI_TYPE3R;
4011 else
4012 return UFI_TYPE3;
4013 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004014 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004015
4016be_get_ufi_exit:
4017 dev_err(&adapter->pdev->dev,
4018 "UFI and Interface are not compatible for flashing\n");
4019 return -1;
4020}
4021
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004022static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4023{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004024 struct flash_file_hdr_g3 *fhdr3;
4025 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004026 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004027 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004028 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004029
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004030 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004031 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4032 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004033 if (!flash_cmd.va) {
4034 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004035 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004036 }
4037
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004038 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004039 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004040
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004041 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004042
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004043 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4044 for (i = 0; i < num_imgs; i++) {
4045 img_hdr_ptr = (struct image_hdr *)(fw->data +
4046 (sizeof(struct flash_file_hdr_g3) +
4047 i * sizeof(struct image_hdr)));
4048 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004049 switch (ufi_type) {
4050 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004051 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304052 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004053 break;
4054 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004055 status = be_flash_BEx(adapter, fw, &flash_cmd,
4056 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004057 break;
4058 case UFI_TYPE3:
4059 /* Do not flash this ufi on BE3-R cards */
4060 if (adapter->asic_rev < 0x10)
4061 status = be_flash_BEx(adapter, fw,
4062 &flash_cmd,
4063 num_imgs);
4064 else {
4065 status = -1;
4066 dev_err(&adapter->pdev->dev,
4067 "Can't load BE3 UFI on BE3R\n");
4068 }
4069 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004070 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004071 }
4072
Sathya Perlaca34fe32012-11-06 17:48:56 +00004073 if (ufi_type == UFI_TYPE2)
4074 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004075 else if (ufi_type == -1)
4076 status = -1;
4077
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004078 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4079 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004080 if (status) {
4081 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004082 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004083 }
4084
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004085 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004086
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004087be_fw_exit:
4088 return status;
4089}
4090
4091int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4092{
4093 const struct firmware *fw;
4094 int status;
4095
4096 if (!netif_running(adapter->netdev)) {
4097 dev_err(&adapter->pdev->dev,
4098 "Firmware load not allowed (interface is down)\n");
4099 return -1;
4100 }
4101
4102 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4103 if (status)
4104 goto fw_exit;
4105
4106 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4107
4108 if (lancer_chip(adapter))
4109 status = lancer_fw_download(adapter, fw);
4110 else
4111 status = be_fw_download(adapter, fw);
4112
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004113 if (!status)
4114 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4115 adapter->fw_on_flash);
4116
Ajit Khaparde84517482009-09-04 03:12:16 +00004117fw_exit:
4118 release_firmware(fw);
4119 return status;
4120}
4121
Sathya Perla748b5392014-05-09 13:29:13 +05304122static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004123{
4124 struct be_adapter *adapter = netdev_priv(dev);
4125 struct nlattr *attr, *br_spec;
4126 int rem;
4127 int status = 0;
4128 u16 mode = 0;
4129
4130 if (!sriov_enabled(adapter))
4131 return -EOPNOTSUPP;
4132
4133 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4134
4135 nla_for_each_nested(attr, br_spec, rem) {
4136 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4137 continue;
4138
4139 mode = nla_get_u16(attr);
4140 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4141 return -EINVAL;
4142
4143 status = be_cmd_set_hsw_config(adapter, 0, 0,
4144 adapter->if_handle,
4145 mode == BRIDGE_MODE_VEPA ?
4146 PORT_FWD_TYPE_VEPA :
4147 PORT_FWD_TYPE_VEB);
4148 if (status)
4149 goto err;
4150
4151 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4152 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4153
4154 return status;
4155 }
4156err:
4157 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4158 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4159
4160 return status;
4161}
4162
4163static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304164 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004165{
4166 struct be_adapter *adapter = netdev_priv(dev);
4167 int status = 0;
4168 u8 hsw_mode;
4169
4170 if (!sriov_enabled(adapter))
4171 return 0;
4172
4173 /* BE and Lancer chips support VEB mode only */
4174 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4175 hsw_mode = PORT_FWD_TYPE_VEB;
4176 } else {
4177 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4178 adapter->if_handle, &hsw_mode);
4179 if (status)
4180 return 0;
4181 }
4182
4183 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4184 hsw_mode == PORT_FWD_TYPE_VEPA ?
4185 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4186}
4187
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304188#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304189static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4190 __be16 port)
4191{
4192 struct be_adapter *adapter = netdev_priv(netdev);
4193 struct device *dev = &adapter->pdev->dev;
4194 int status;
4195
4196 if (lancer_chip(adapter) || BEx_chip(adapter))
4197 return;
4198
4199 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4200 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4201 be16_to_cpu(port));
4202 dev_info(dev,
4203 "Only one UDP port supported for VxLAN offloads\n");
4204 return;
4205 }
4206
4207 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4208 OP_CONVERT_NORMAL_TO_TUNNEL);
4209 if (status) {
4210 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4211 goto err;
4212 }
4213
4214 status = be_cmd_set_vxlan_port(adapter, port);
4215 if (status) {
4216 dev_warn(dev, "Failed to add VxLAN port\n");
4217 goto err;
4218 }
4219 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4220 adapter->vxlan_port = port;
4221
4222 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4223 be16_to_cpu(port));
4224 return;
4225err:
4226 be_disable_vxlan_offloads(adapter);
4227 return;
4228}
4229
4230static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4231 __be16 port)
4232{
4233 struct be_adapter *adapter = netdev_priv(netdev);
4234
4235 if (lancer_chip(adapter) || BEx_chip(adapter))
4236 return;
4237
4238 if (adapter->vxlan_port != port)
4239 return;
4240
4241 be_disable_vxlan_offloads(adapter);
4242
4243 dev_info(&adapter->pdev->dev,
4244 "Disabled VxLAN offloads for UDP port %d\n",
4245 be16_to_cpu(port));
4246}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304247#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304248
stephen hemmingere5686ad2012-01-05 19:10:25 +00004249static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250 .ndo_open = be_open,
4251 .ndo_stop = be_close,
4252 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004253 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004254 .ndo_set_mac_address = be_mac_addr_set,
4255 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004256 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004258 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4259 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004260 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004261 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004262 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004263 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304264 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004265#ifdef CONFIG_NET_POLL_CONTROLLER
4266 .ndo_poll_controller = be_netpoll,
4267#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004268 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4269 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304270#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304271 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304272#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304273#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304274 .ndo_add_vxlan_port = be_add_vxlan_port,
4275 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304276#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004277};
4278
4279static void be_netdev_init(struct net_device *netdev)
4280{
4281 struct be_adapter *adapter = netdev_priv(netdev);
4282
Sathya Perlac9c47142014-03-27 10:46:19 +05304283 if (skyhawk_chip(adapter)) {
4284 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4285 NETIF_F_TSO | NETIF_F_TSO6 |
4286 NETIF_F_GSO_UDP_TUNNEL;
4287 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4288 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004289 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004290 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004291 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004292 if (be_multi_rxq(adapter))
4293 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004294
4295 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004296 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004297
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004298 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004299 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004300
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004301 netdev->priv_flags |= IFF_UNICAST_FLT;
4302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004303 netdev->flags |= IFF_MULTICAST;
4304
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004305 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004307 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004308
4309 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004310}
4311
4312static void be_unmap_pci_bars(struct be_adapter *adapter)
4313{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004314 if (adapter->csr)
4315 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004316 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004317 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004318}
4319
Sathya Perlace66f782012-11-06 17:48:58 +00004320static int db_bar(struct be_adapter *adapter)
4321{
4322 if (lancer_chip(adapter) || !be_physfn(adapter))
4323 return 0;
4324 else
4325 return 4;
4326}
4327
4328static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004329{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004330 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004331 adapter->roce_db.size = 4096;
4332 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4333 db_bar(adapter));
4334 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4335 db_bar(adapter));
4336 }
Parav Pandit045508a2012-03-26 14:27:13 +00004337 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004338}
4339
4340static int be_map_pci_bars(struct be_adapter *adapter)
4341{
4342 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004343
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004344 if (BEx_chip(adapter) && be_physfn(adapter)) {
4345 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4346 if (adapter->csr == NULL)
4347 return -ENOMEM;
4348 }
4349
Sathya Perlace66f782012-11-06 17:48:58 +00004350 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004351 if (addr == NULL)
4352 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004353 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004354
4355 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004356 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004358pci_map_err:
4359 be_unmap_pci_bars(adapter);
4360 return -ENOMEM;
4361}
4362
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004363static void be_ctrl_cleanup(struct be_adapter *adapter)
4364{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004365 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004366
4367 be_unmap_pci_bars(adapter);
4368
4369 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004370 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4371 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004372
Sathya Perla5b8821b2011-08-02 19:57:44 +00004373 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004374 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004375 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4376 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004377}
4378
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004379static int be_ctrl_init(struct be_adapter *adapter)
4380{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004381 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4382 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004383 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004384 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004385 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004386
Sathya Perlace66f782012-11-06 17:48:58 +00004387 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4388 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4389 SLI_INTF_FAMILY_SHIFT;
4390 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4391
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004392 status = be_map_pci_bars(adapter);
4393 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004394 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395
4396 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004397 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4398 mbox_mem_alloc->size,
4399 &mbox_mem_alloc->dma,
4400 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004401 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004402 status = -ENOMEM;
4403 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004404 }
4405 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4406 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4407 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4408 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004409
Sathya Perla5b8821b2011-08-02 19:57:44 +00004410 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004411 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4412 rx_filter->size, &rx_filter->dma,
4413 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004414 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004415 status = -ENOMEM;
4416 goto free_mbox;
4417 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004418
Ivan Vecera29849612010-12-14 05:43:19 +00004419 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004420 spin_lock_init(&adapter->mcc_lock);
4421 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004422
Suresh Reddy5eeff632014-01-06 13:02:24 +05304423 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004424 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004426
4427free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004428 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4429 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004430
4431unmap_pci_bars:
4432 be_unmap_pci_bars(adapter);
4433
4434done:
4435 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436}
4437
4438static void be_stats_cleanup(struct be_adapter *adapter)
4439{
Sathya Perla3abcded2010-10-03 22:12:27 -07004440 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004441
4442 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004443 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4444 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004445}
4446
4447static int be_stats_init(struct be_adapter *adapter)
4448{
Sathya Perla3abcded2010-10-03 22:12:27 -07004449 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004450
Sathya Perlaca34fe32012-11-06 17:48:56 +00004451 if (lancer_chip(adapter))
4452 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4453 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004454 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004455 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004456 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004457 else
4458 /* ALL non-BE ASICs */
4459 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004460
Joe Perchesede23fa82013-08-26 22:45:23 -07004461 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4462 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004463 if (cmd->va == NULL)
4464 return -1;
4465 return 0;
4466}
4467
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004468static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004469{
4470 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004471
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004472 if (!adapter)
4473 return;
4474
Parav Pandit045508a2012-03-26 14:27:13 +00004475 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004476 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004477
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004478 cancel_delayed_work_sync(&adapter->func_recovery_work);
4479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004480 unregister_netdev(adapter->netdev);
4481
Sathya Perla5fb379e2009-06-18 00:02:59 +00004482 be_clear(adapter);
4483
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004484 /* tell fw we're done with firing cmds */
4485 be_cmd_fw_clean(adapter);
4486
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004487 be_stats_cleanup(adapter);
4488
4489 be_ctrl_cleanup(adapter);
4490
Sathya Perlad6b6d982012-09-05 01:56:48 +00004491 pci_disable_pcie_error_reporting(pdev);
4492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004493 pci_release_regions(pdev);
4494 pci_disable_device(pdev);
4495
4496 free_netdev(adapter->netdev);
4497}
4498
Sathya Perla39f1d942012-05-08 19:41:24 +00004499static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004500{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304501 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004502
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004503 status = be_cmd_get_cntl_attributes(adapter);
4504 if (status)
4505 return status;
4506
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004507 /* Must be a power of 2 or else MODULO will BUG_ON */
4508 adapter->be_get_temp_freq = 64;
4509
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304510 if (BEx_chip(adapter)) {
4511 level = be_cmd_get_fw_log_level(adapter);
4512 adapter->msg_enable =
4513 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4514 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004515
Sathya Perla92bf14a2013-08-27 16:57:32 +05304516 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004517 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004518}
4519
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004520static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004521{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004522 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004523 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004524
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004525 status = lancer_test_and_set_rdy_state(adapter);
4526 if (status)
4527 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004528
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004529 if (netif_running(adapter->netdev))
4530 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004531
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004532 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004533
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004534 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004535
4536 status = be_setup(adapter);
4537 if (status)
4538 goto err;
4539
4540 if (netif_running(adapter->netdev)) {
4541 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004542 if (status)
4543 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004544 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004545
Somnath Kotur4bebb562013-12-05 12:07:55 +05304546 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004547 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004548err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004549 if (status == -EAGAIN)
4550 dev_err(dev, "Waiting for resource provisioning\n");
4551 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304552 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004553
4554 return status;
4555}
4556
4557static void be_func_recovery_task(struct work_struct *work)
4558{
4559 struct be_adapter *adapter =
4560 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004561 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004562
4563 be_detect_error(adapter);
4564
4565 if (adapter->hw_error && lancer_chip(adapter)) {
4566
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004567 rtnl_lock();
4568 netif_device_detach(adapter->netdev);
4569 rtnl_unlock();
4570
4571 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004572 if (!status)
4573 netif_device_attach(adapter->netdev);
4574 }
4575
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004576 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4577 * no need to attempt further recovery.
4578 */
4579 if (!status || status == -EAGAIN)
4580 schedule_delayed_work(&adapter->func_recovery_work,
4581 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004582}
4583
4584static void be_worker(struct work_struct *work)
4585{
4586 struct be_adapter *adapter =
4587 container_of(work, struct be_adapter, work.work);
4588 struct be_rx_obj *rxo;
4589 int i;
4590
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004591 /* when interrupts are not yet enabled, just reap any pending
4592 * mcc completions */
4593 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004594 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004595 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004596 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004597 goto reschedule;
4598 }
4599
4600 if (!adapter->stats_cmd_sent) {
4601 if (lancer_chip(adapter))
4602 lancer_cmd_get_pport_stats(adapter,
4603 &adapter->stats_cmd);
4604 else
4605 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4606 }
4607
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304608 if (be_physfn(adapter) &&
4609 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004610 be_cmd_get_die_temperature(adapter);
4611
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004612 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304613 /* Replenish RX-queues starved due to memory
4614 * allocation failures.
4615 */
4616 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004617 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004618 }
4619
Sathya Perla2632baf2013-10-01 16:00:00 +05304620 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004621
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004622reschedule:
4623 adapter->work_counter++;
4624 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4625}
4626
Sathya Perla257a3fe2013-06-14 15:54:51 +05304627/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004628static bool be_reset_required(struct be_adapter *adapter)
4629{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304630 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004631}
4632
Sathya Perlad3791422012-09-28 04:39:44 +00004633static char *mc_name(struct be_adapter *adapter)
4634{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304635 char *str = ""; /* default */
4636
4637 switch (adapter->mc_type) {
4638 case UMC:
4639 str = "UMC";
4640 break;
4641 case FLEX10:
4642 str = "FLEX10";
4643 break;
4644 case vNIC1:
4645 str = "vNIC-1";
4646 break;
4647 case nPAR:
4648 str = "nPAR";
4649 break;
4650 case UFP:
4651 str = "UFP";
4652 break;
4653 case vNIC2:
4654 str = "vNIC-2";
4655 break;
4656 default:
4657 str = "";
4658 }
4659
4660 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004661}
4662
4663static inline char *func_name(struct be_adapter *adapter)
4664{
4665 return be_physfn(adapter) ? "PF" : "VF";
4666}
4667
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004668static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004669{
4670 int status = 0;
4671 struct be_adapter *adapter;
4672 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004673 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004674
4675 status = pci_enable_device(pdev);
4676 if (status)
4677 goto do_none;
4678
4679 status = pci_request_regions(pdev, DRV_NAME);
4680 if (status)
4681 goto disable_dev;
4682 pci_set_master(pdev);
4683
Sathya Perla7f640062012-06-05 19:37:20 +00004684 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004685 if (netdev == NULL) {
4686 status = -ENOMEM;
4687 goto rel_reg;
4688 }
4689 adapter = netdev_priv(netdev);
4690 adapter->pdev = pdev;
4691 pci_set_drvdata(pdev, adapter);
4692 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004693 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004694
Russell King4c15c242013-06-26 23:49:11 +01004695 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004696 if (!status) {
4697 netdev->features |= NETIF_F_HIGHDMA;
4698 } else {
Russell King4c15c242013-06-26 23:49:11 +01004699 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004700 if (status) {
4701 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4702 goto free_netdev;
4703 }
4704 }
4705
Ajit Khapardeea58c182013-10-18 16:06:24 -05004706 if (be_physfn(adapter)) {
4707 status = pci_enable_pcie_error_reporting(pdev);
4708 if (!status)
4709 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4710 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004711
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004712 status = be_ctrl_init(adapter);
4713 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004714 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004715
Sathya Perla2243e2e2009-11-22 22:02:03 +00004716 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004717 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004718 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004719 if (status)
4720 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004721 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004722
Sathya Perla39f1d942012-05-08 19:41:24 +00004723 if (be_reset_required(adapter)) {
4724 status = be_cmd_reset_function(adapter);
4725 if (status)
4726 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004727
Kalesh AP2d177be2013-04-28 22:22:29 +00004728 /* Wait for interrupts to quiesce after an FLR */
4729 msleep(100);
4730 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004731
4732 /* Allow interrupts for other ULPs running on NIC function */
4733 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004734
Kalesh AP2d177be2013-04-28 22:22:29 +00004735 /* tell fw we're ready to fire cmds */
4736 status = be_cmd_fw_init(adapter);
4737 if (status)
4738 goto ctrl_clean;
4739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004740 status = be_stats_init(adapter);
4741 if (status)
4742 goto ctrl_clean;
4743
Sathya Perla39f1d942012-05-08 19:41:24 +00004744 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004745 if (status)
4746 goto stats_clean;
4747
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004748 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004749 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004750 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004751
Sathya Perla5fb379e2009-06-18 00:02:59 +00004752 status = be_setup(adapter);
4753 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004754 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004755
Sathya Perla3abcded2010-10-03 22:12:27 -07004756 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004757 status = register_netdev(netdev);
4758 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004759 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004760
Parav Pandit045508a2012-03-26 14:27:13 +00004761 be_roce_dev_add(adapter);
4762
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004763 schedule_delayed_work(&adapter->func_recovery_work,
4764 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004765
4766 be_cmd_query_port_name(adapter, &port_name);
4767
Sathya Perlad3791422012-09-28 04:39:44 +00004768 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4769 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004770
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004771 return 0;
4772
Sathya Perla5fb379e2009-06-18 00:02:59 +00004773unsetup:
4774 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004775stats_clean:
4776 be_stats_cleanup(adapter);
4777ctrl_clean:
4778 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004779free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004780 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004781rel_reg:
4782 pci_release_regions(pdev);
4783disable_dev:
4784 pci_disable_device(pdev);
4785do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004786 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004787 return status;
4788}
4789
4790static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4791{
4792 struct be_adapter *adapter = pci_get_drvdata(pdev);
4793 struct net_device *netdev = adapter->netdev;
4794
Suresh Reddy76a9e082014-01-15 13:23:40 +05304795 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004796 be_setup_wol(adapter, true);
4797
Ajit Khaparded4360d62013-11-22 12:51:09 -06004798 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004799 cancel_delayed_work_sync(&adapter->func_recovery_work);
4800
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004801 netif_device_detach(netdev);
4802 if (netif_running(netdev)) {
4803 rtnl_lock();
4804 be_close(netdev);
4805 rtnl_unlock();
4806 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004807 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004808
4809 pci_save_state(pdev);
4810 pci_disable_device(pdev);
4811 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4812 return 0;
4813}
4814
4815static int be_resume(struct pci_dev *pdev)
4816{
4817 int status = 0;
4818 struct be_adapter *adapter = pci_get_drvdata(pdev);
4819 struct net_device *netdev = adapter->netdev;
4820
4821 netif_device_detach(netdev);
4822
4823 status = pci_enable_device(pdev);
4824 if (status)
4825 return status;
4826
Yijing Wang1ca01512013-06-27 20:53:42 +08004827 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004828 pci_restore_state(pdev);
4829
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304830 status = be_fw_wait_ready(adapter);
4831 if (status)
4832 return status;
4833
Ajit Khaparded4360d62013-11-22 12:51:09 -06004834 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004835 /* tell fw we're ready to fire cmds */
4836 status = be_cmd_fw_init(adapter);
4837 if (status)
4838 return status;
4839
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004840 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004841 if (netif_running(netdev)) {
4842 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004843 be_open(netdev);
4844 rtnl_unlock();
4845 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004846
4847 schedule_delayed_work(&adapter->func_recovery_work,
4848 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004849 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004850
Suresh Reddy76a9e082014-01-15 13:23:40 +05304851 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004852 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004854 return 0;
4855}
4856
Sathya Perla82456b02010-02-17 01:35:37 +00004857/*
4858 * An FLR will stop BE from DMAing any data.
4859 */
4860static void be_shutdown(struct pci_dev *pdev)
4861{
4862 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004863
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004864 if (!adapter)
4865 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004866
Sathya Perla0f4a6822011-03-21 20:49:28 +00004867 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004868 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004869
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004870 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004871
Ajit Khaparde57841862011-04-06 18:08:43 +00004872 be_cmd_reset_function(adapter);
4873
Sathya Perla82456b02010-02-17 01:35:37 +00004874 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004875}
4876
Sathya Perlacf588472010-02-14 21:22:01 +00004877static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05304878 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00004879{
4880 struct be_adapter *adapter = pci_get_drvdata(pdev);
4881 struct net_device *netdev = adapter->netdev;
4882
4883 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4884
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004885 if (!adapter->eeh_error) {
4886 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004887
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004888 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004889
Sathya Perlacf588472010-02-14 21:22:01 +00004890 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004891 netif_device_detach(netdev);
4892 if (netif_running(netdev))
4893 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004894 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004895
4896 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004897 }
Sathya Perlacf588472010-02-14 21:22:01 +00004898
4899 if (state == pci_channel_io_perm_failure)
4900 return PCI_ERS_RESULT_DISCONNECT;
4901
4902 pci_disable_device(pdev);
4903
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004904 /* The error could cause the FW to trigger a flash debug dump.
4905 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004906 * can cause it not to recover; wait for it to finish.
4907 * Wait only for first function as it is needed only once per
4908 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004909 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004910 if (pdev->devfn == 0)
4911 ssleep(30);
4912
Sathya Perlacf588472010-02-14 21:22:01 +00004913 return PCI_ERS_RESULT_NEED_RESET;
4914}
4915
4916static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4917{
4918 struct be_adapter *adapter = pci_get_drvdata(pdev);
4919 int status;
4920
4921 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004922
4923 status = pci_enable_device(pdev);
4924 if (status)
4925 return PCI_ERS_RESULT_DISCONNECT;
4926
4927 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004928 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004929 pci_restore_state(pdev);
4930
4931 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004932 dev_info(&adapter->pdev->dev,
4933 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004934 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004935 if (status)
4936 return PCI_ERS_RESULT_DISCONNECT;
4937
Sathya Perlad6b6d982012-09-05 01:56:48 +00004938 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004939 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004940 return PCI_ERS_RESULT_RECOVERED;
4941}
4942
4943static void be_eeh_resume(struct pci_dev *pdev)
4944{
4945 int status = 0;
4946 struct be_adapter *adapter = pci_get_drvdata(pdev);
4947 struct net_device *netdev = adapter->netdev;
4948
4949 dev_info(&adapter->pdev->dev, "EEH resume\n");
4950
4951 pci_save_state(pdev);
4952
Kalesh AP2d177be2013-04-28 22:22:29 +00004953 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004954 if (status)
4955 goto err;
4956
Kalesh AP2d177be2013-04-28 22:22:29 +00004957 /* tell fw we're ready to fire cmds */
4958 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004959 if (status)
4960 goto err;
4961
Sathya Perlacf588472010-02-14 21:22:01 +00004962 status = be_setup(adapter);
4963 if (status)
4964 goto err;
4965
4966 if (netif_running(netdev)) {
4967 status = be_open(netdev);
4968 if (status)
4969 goto err;
4970 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004971
4972 schedule_delayed_work(&adapter->func_recovery_work,
4973 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004974 netif_device_attach(netdev);
4975 return;
4976err:
4977 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004978}
4979
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004980static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004981 .error_detected = be_eeh_err_detected,
4982 .slot_reset = be_eeh_reset,
4983 .resume = be_eeh_resume,
4984};
4985
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004986static struct pci_driver be_driver = {
4987 .name = DRV_NAME,
4988 .id_table = be_dev_ids,
4989 .probe = be_probe,
4990 .remove = be_remove,
4991 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004992 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004993 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004994 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004995};
4996
4997static int __init be_init_module(void)
4998{
Joe Perches8e95a202009-12-03 07:58:21 +00004999 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5000 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005001 printk(KERN_WARNING DRV_NAME
5002 " : Module param rx_frag_size must be 2048/4096/8192."
5003 " Using 2048\n");
5004 rx_frag_size = 2048;
5005 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005006
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005007 return pci_register_driver(&be_driver);
5008}
5009module_init(be_init_module);
5010
5011static void __exit be_exit_module(void)
5012{
5013 pci_unregister_driver(&be_driver);
5014}
5015module_exit(be_exit_module);