blob: a32dc4fbb73c472fa72437dc04346a45736d56e3 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Patrick McHardy80d5c362013-04-19 02:04:28 +00001148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301155 return status;
1156
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301157 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301158 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301161 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001162
Somnath Kotura6b74e02014-01-21 15:50:55 +05301163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301168
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001169 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170}
1171
Patrick McHardy80d5c362013-04-19 02:04:28 +00001172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001175 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001177 /* Packets with VID 0 are always received by Lancer by default */
1178 if (lancer_chip(adapter) && vid == 0)
1179 goto ret;
1180
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301181 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301182 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001183 if (!status)
1184 adapter->vlans_added--;
1185 else
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301186 set_bit(vid, adapter->vids);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001187ret:
1188 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189}
1190
Somnath kotur7ad09452014-03-03 14:24:43 +05301191static void be_clear_promisc(struct be_adapter *adapter)
1192{
1193 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301194 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301195
1196 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1197}
1198
Sathya Perlaa54769f2011-10-24 02:45:00 +00001199static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200{
1201 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001202 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203
1204 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001205 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001206 adapter->promiscuous = true;
1207 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001209
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001210 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001211 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301212 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001213 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001214 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001215 }
1216
Sathya Perlae7b909a2009-11-22 22:01:10 +00001217 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001218 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301219 netdev_mc_count(netdev) > be_max_mc(adapter))
1220 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001221
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001222 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1223 struct netdev_hw_addr *ha;
1224 int i = 1; /* First slot is claimed by the Primary MAC */
1225
1226 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1227 be_cmd_pmac_del(adapter, adapter->if_handle,
1228 adapter->pmac_id[i], 0);
1229 }
1230
Sathya Perla92bf14a2013-08-27 16:57:32 +05301231 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001232 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1233 adapter->promiscuous = true;
1234 goto done;
1235 }
1236
1237 netdev_for_each_uc_addr(ha, adapter->netdev) {
1238 adapter->uc_macs++; /* First slot is for Primary MAC */
1239 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1240 adapter->if_handle,
1241 &adapter->pmac_id[adapter->uc_macs], 0);
1242 }
1243 }
1244
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001245 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301246 if (!status) {
1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1248 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1249 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001250 }
Kalesh APa0794882014-05-30 19:06:23 +05301251
1252set_mcast_promisc:
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 return;
1255
1256 /* Set to MCAST promisc mode if setting MULTICAST address fails
1257 * or if num configured exceeds what we support
1258 */
1259 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 if (!status)
1261 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001262done:
1263 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264}
1265
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001266static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1267{
1268 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001270 int status;
1271
Sathya Perla11ac75e2011-12-13 00:58:50 +00001272 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001273 return -EPERM;
1274
Sathya Perla11ac75e2011-12-13 00:58:50 +00001275 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276 return -EINVAL;
1277
Sathya Perla3175d8c2013-07-23 15:25:03 +05301278 if (BEx_chip(adapter)) {
1279 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1280 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001281
Sathya Perla11ac75e2011-12-13 00:58:50 +00001282 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1283 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301284 } else {
1285 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1286 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001287 }
1288
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001289 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301291 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001292 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001293 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001294
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001295 return status;
1296}
1297
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001298static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301299 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001300{
1301 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001302 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303
Sathya Perla11ac75e2011-12-13 00:58:50 +00001304 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001305 return -EPERM;
1306
Sathya Perla11ac75e2011-12-13 00:58:50 +00001307 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001308 return -EINVAL;
1309
1310 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001311 vi->max_tx_rate = vf_cfg->tx_rate;
1312 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001313 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1314 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001315 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301316 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001317
1318 return 0;
1319}
1320
Sathya Perla748b5392014-05-09 13:29:13 +05301321static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001322{
1323 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325 int status = 0;
1326
Sathya Perla11ac75e2011-12-13 00:58:50 +00001327 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001328 return -EPERM;
1329
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001330 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001331 return -EINVAL;
1332
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001333 if (vlan || qos) {
1334 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301335 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001336 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1337 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001338 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001339 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301340 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1341 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 }
1343
Somnath Koturc5022242014-03-03 14:24:20 +05301344 if (!status)
1345 vf_cfg->vlan_tag = vlan;
1346 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001347 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301348 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001349 return status;
1350}
1351
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001352static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1353 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001354{
1355 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301356 struct device *dev = &adapter->pdev->dev;
1357 int percent_rate, status = 0;
1358 u16 link_speed = 0;
1359 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001360
Sathya Perla11ac75e2011-12-13 00:58:50 +00001361 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001362 return -EPERM;
1363
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001364 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001365 return -EINVAL;
1366
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001367 if (min_tx_rate)
1368 return -EINVAL;
1369
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301370 if (!max_tx_rate)
1371 goto config_qos;
1372
1373 status = be_cmd_link_status_query(adapter, &link_speed,
1374 &link_status, 0);
1375 if (status)
1376 goto err;
1377
1378 if (!link_status) {
1379 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1380 status = -EPERM;
1381 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001382 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001383
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301384 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1385 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1386 link_speed);
1387 status = -EINVAL;
1388 goto err;
1389 }
1390
1391 /* On Skyhawk the QOS setting must be done only as a % value */
1392 percent_rate = link_speed / 100;
1393 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1394 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1395 percent_rate);
1396 status = -EINVAL;
1397 goto err;
1398 }
1399
1400config_qos:
1401 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001402 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301403 goto err;
1404
1405 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1406 return 0;
1407
1408err:
1409 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1410 max_tx_rate, vf);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001411 return status;
1412}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301413static int be_set_vf_link_state(struct net_device *netdev, int vf,
1414 int link_state)
1415{
1416 struct be_adapter *adapter = netdev_priv(netdev);
1417 int status;
1418
1419 if (!sriov_enabled(adapter))
1420 return -EPERM;
1421
1422 if (vf >= adapter->num_vfs)
1423 return -EINVAL;
1424
1425 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1426 if (!status)
1427 adapter->vf_cfg[vf].plink_tracking = link_state;
1428
1429 return status;
1430}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001431
Sathya Perla2632baf2013-10-01 16:00:00 +05301432static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1433 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434{
Sathya Perla2632baf2013-10-01 16:00:00 +05301435 aic->rx_pkts_prev = rx_pkts;
1436 aic->tx_reqs_prev = tx_pkts;
1437 aic->jiffies = now;
1438}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001439
Sathya Perla2632baf2013-10-01 16:00:00 +05301440static void be_eqd_update(struct be_adapter *adapter)
1441{
1442 struct be_set_eqd set_eqd[MAX_EVT_QS];
1443 int eqd, i, num = 0, start;
1444 struct be_aic_obj *aic;
1445 struct be_eq_obj *eqo;
1446 struct be_rx_obj *rxo;
1447 struct be_tx_obj *txo;
1448 u64 rx_pkts, tx_pkts;
1449 ulong now;
1450 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451
Sathya Perla2632baf2013-10-01 16:00:00 +05301452 for_all_evt_queues(adapter, eqo, i) {
1453 aic = &adapter->aic_obj[eqo->idx];
1454 if (!aic->enable) {
1455 if (aic->jiffies)
1456 aic->jiffies = 0;
1457 eqd = aic->et_eqd;
1458 goto modify_eqd;
1459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460
Sathya Perla2632baf2013-10-01 16:00:00 +05301461 rxo = &adapter->rx_obj[eqo->idx];
1462 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001463 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301464 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001465 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001466
Sathya Perla2632baf2013-10-01 16:00:00 +05301467 txo = &adapter->tx_obj[eqo->idx];
1468 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001469 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301470 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001471 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001472
Sathya Perla4097f662009-03-24 16:40:13 -07001473
Sathya Perla2632baf2013-10-01 16:00:00 +05301474 /* Skip, if wrapped around or first calculation */
1475 now = jiffies;
1476 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1477 rx_pkts < aic->rx_pkts_prev ||
1478 tx_pkts < aic->tx_reqs_prev) {
1479 be_aic_update(aic, rx_pkts, tx_pkts, now);
1480 continue;
1481 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001482
Sathya Perla2632baf2013-10-01 16:00:00 +05301483 delta = jiffies_to_msecs(now - aic->jiffies);
1484 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1485 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1486 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001487
Sathya Perla2632baf2013-10-01 16:00:00 +05301488 if (eqd < 8)
1489 eqd = 0;
1490 eqd = min_t(u32, eqd, aic->max_eqd);
1491 eqd = max_t(u32, eqd, aic->min_eqd);
1492
1493 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001494modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301495 if (eqd != aic->prev_eqd) {
1496 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1497 set_eqd[num].eq_id = eqo->q.id;
1498 aic->prev_eqd = eqd;
1499 num++;
1500 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001501 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301502
1503 if (num)
1504 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001505}
1506
Sathya Perla3abcded2010-10-03 22:12:27 -07001507static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301508 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001509{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001510 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001511
Sathya Perlaab1594e2011-07-25 19:10:15 +00001512 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001513 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001514 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001515 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001516 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001517 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001518 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001519 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001520 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521}
1522
Sathya Perla2e588f82011-03-11 02:49:26 +00001523static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001524{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001525 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301526 * Also ignore ipcksm for ipv6 pkts
1527 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301529 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001530}
1531
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301532static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001534 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301537 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 BUG_ON(!rx_page_info->page);
1541
Sathya Perlae50287b2014-03-04 12:14:38 +05301542 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001543 dma_unmap_page(&adapter->pdev->dev,
1544 dma_unmap_addr(rx_page_info, bus),
1545 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301546 rx_page_info->last_frag = false;
1547 } else {
1548 dma_sync_single_for_cpu(&adapter->pdev->dev,
1549 dma_unmap_addr(rx_page_info, bus),
1550 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301553 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 atomic_dec(&rxq->used);
1555 return rx_page_info;
1556}
1557
1558/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559static void be_rx_compl_discard(struct be_rx_obj *rxo,
1560 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001563 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001565 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301566 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001567 put_page(page_info->page);
1568 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 }
1570}
1571
1572/*
1573 * skb_fill_rx_data forms a complete skb for an ether frame
1574 * indicated by rxcp.
1575 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001576static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1577 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001580 u16 i, j;
1581 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 u8 *start;
1583
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301584 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 start = page_address(page_info->page) + page_info->page_offset;
1586 prefetch(start);
1587
1588 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001589 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 skb->len = curr_frag_len;
1592 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001593 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 /* Complete packet has now been moved to data */
1595 put_page(page_info->page);
1596 skb->data_len = 0;
1597 skb->tail += curr_frag_len;
1598 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001599 hdr_len = ETH_HLEN;
1600 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001602 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 skb_shinfo(skb)->frags[0].page_offset =
1604 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301605 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1606 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001608 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 skb->tail += hdr_len;
1610 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001611 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612
Sathya Perla2e588f82011-03-11 02:49:26 +00001613 if (rxcp->pkt_size <= rx_frag_size) {
1614 BUG_ON(rxcp->num_rcvd != 1);
1615 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 }
1617
1618 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001619 remaining = rxcp->pkt_size - curr_frag_len;
1620 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301621 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001622 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001624 /* Coalesce all frags from the same physical page in one slot */
1625 if (page_info->page_offset == 0) {
1626 /* Fresh page */
1627 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001628 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001629 skb_shinfo(skb)->frags[j].page_offset =
1630 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001631 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001632 skb_shinfo(skb)->nr_frags++;
1633 } else {
1634 put_page(page_info->page);
1635 }
1636
Eric Dumazet9e903e02011-10-18 21:00:24 +00001637 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 skb->len += curr_frag_len;
1639 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001640 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001641 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001642 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001644 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645}
1646
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001647/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301648static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001649 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001651 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001652 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001654
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001655 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001656 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001657 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001658 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 return;
1660 }
1661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001664 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001665 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001666 else
1667 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001669 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001670 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001671 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001672 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301673
1674 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301675 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676
Jiri Pirko343e43c2011-08-25 02:50:51 +00001677 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001678 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001679
1680 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681}
1682
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001683/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001684static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1685 struct napi_struct *napi,
1686 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001688 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001690 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001691 u16 remaining, curr_frag_len;
1692 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001693
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001694 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001695 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001696 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001697 return;
1698 }
1699
Sathya Perla2e588f82011-03-11 02:49:26 +00001700 remaining = rxcp->pkt_size;
1701 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301702 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703
1704 curr_frag_len = min(remaining, rx_frag_size);
1705
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001706 /* Coalesce all frags from the same physical page in one slot */
1707 if (i == 0 || page_info->page_offset == 0) {
1708 /* First frag or Fresh page */
1709 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001710 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001711 skb_shinfo(skb)->frags[j].page_offset =
1712 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001713 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001714 } else {
1715 put_page(page_info->page);
1716 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001717 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001718 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 memset(page_info, 0, sizeof(*page_info));
1721 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001722 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001724 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001725 skb->len = rxcp->pkt_size;
1726 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001727 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001728 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001729 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001730 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301731
1732 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301733 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001734
Jiri Pirko343e43c2011-08-25 02:50:51 +00001735 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001736 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001737
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001738 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739}
1740
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001741static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1742 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743{
Sathya Perla2e588f82011-03-11 02:49:26 +00001744 rxcp->pkt_size =
1745 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1746 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1747 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1748 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001749 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001750 rxcp->ip_csum =
1751 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1752 rxcp->l4_csum =
1753 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1754 rxcp->ipv6 =
1755 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001756 rxcp->num_rcvd =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1758 rxcp->pkt_type =
1759 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001760 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001761 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001762 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301763 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001764 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301765 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1766 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001767 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001768 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301769 rxcp->tunneled =
1770 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1774 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001775{
1776 rxcp->pkt_size =
1777 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1778 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1779 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1780 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001781 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001782 rxcp->ip_csum =
1783 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1784 rxcp->l4_csum =
1785 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1786 rxcp->ipv6 =
1787 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001788 rxcp->num_rcvd =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1790 rxcp->pkt_type =
1791 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001792 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001793 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001794 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301795 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001796 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301797 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1798 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001799 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001800 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001801 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1802 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001803}
1804
1805static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1806{
1807 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1808 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1809 struct be_adapter *adapter = rxo->adapter;
1810
1811 /* For checking the valid bit it is Ok to use either definition as the
1812 * valid bit is at the same position in both v0 and v1 Rx compl */
1813 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814 return NULL;
1815
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001816 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001817 be_dws_le_to_cpu(compl, sizeof(*compl));
1818
1819 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001821 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001822 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001823
Somnath Koture38b1702013-05-29 22:55:56 +00001824 if (rxcp->ip_frag)
1825 rxcp->l4_csum = 0;
1826
Sathya Perla15d72182011-03-21 20:49:26 +00001827 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301828 /* In QNQ modes, if qnq bit is not set, then the packet was
1829 * tagged only with the transparent outer vlan-tag and must
1830 * not be treated as a vlan packet by host
1831 */
1832 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001833 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001834
Sathya Perla15d72182011-03-21 20:49:26 +00001835 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001837
Somnath Kotur939cf302011-08-18 21:51:49 -07001838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301839 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001840 rxcp->vlanf = 0;
1841 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001842
1843 /* As the compl has been parsed, reset it; we wont touch it again */
1844 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845
Sathya Perla3abcded2010-10-03 22:12:27 -07001846 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 return rxcp;
1848}
1849
Eric Dumazet1829b082011-03-01 05:48:12 +00001850static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001855 gfp |= __GFP_COMP;
1856 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857}
1858
1859/*
1860 * Allocate a page, split it to fragments of size rx_frag_size and post as
1861 * receive buffers to BE
1862 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001863static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864{
Sathya Perla3abcded2010-10-03 22:12:27 -07001865 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001866 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001869 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 struct be_eth_rx_d *rxd;
1871 u64 page_dmaaddr = 0, frag_dmaaddr;
1872 u32 posted, page_offset = 0;
1873
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1876 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001877 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001879 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 break;
1881 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001882 page_dmaaddr = dma_map_page(dev, pagep, 0,
1883 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001884 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001885 if (dma_mapping_error(dev, page_dmaaddr)) {
1886 put_page(pagep);
1887 pagep = NULL;
1888 rx_stats(rxo)->rx_post_fail++;
1889 break;
1890 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301891 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892 } else {
1893 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301894 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301896 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
1899 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1902 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903
1904 /* Any space left in the current big page for another frag? */
1905 if ((page_offset + rx_frag_size + rx_frag_size) >
1906 adapter->big_page_size) {
1907 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301908 page_info->last_frag = true;
1909 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1910 } else {
1911 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001913
1914 prev_page_info = page_info;
1915 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301918
1919 /* Mark the last frag of a page when we break out of the above loop
1920 * with no more slots available in the RXQ
1921 */
1922 if (pagep) {
1923 prev_page_info->last_frag = true;
1924 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1925 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
1927 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301929 if (rxo->rx_post_starved)
1930 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001931 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001932 } else if (atomic_read(&rxq->used) == 0) {
1933 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001934 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936}
1937
Sathya Perla5fb379e2009-06-18 00:02:59 +00001938static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1941
1942 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1943 return NULL;
1944
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001945 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1947
1948 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1949
1950 queue_tail_inc(tx_cq);
1951 return txcp;
1952}
1953
Sathya Perla3c8def92011-06-12 20:01:58 +00001954static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301955 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956{
Sathya Perla3c8def92011-06-12 20:01:58 +00001957 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001958 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001959 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001961 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1962 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001964 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001966 sent_skbs[txq->tail] = NULL;
1967
1968 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001969 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001971 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001973 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001974 unmap_tx_frag(&adapter->pdev->dev, wrb,
1975 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001976 unmap_skb_hdr = false;
1977
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 num_wrbs++;
1979 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001980 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001982 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001983 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984}
1985
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001986/* Return the number of events in the event queue */
1987static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001988{
1989 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001991
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992 do {
1993 eqe = queue_tail_node(&eqo->q);
1994 if (eqe->evt == 0)
1995 break;
1996
1997 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001998 eqe->evt = 0;
1999 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 queue_tail_inc(&eqo->q);
2001 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002002
2003 return num;
2004}
2005
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006/* Leaves the EQ is disarmed state */
2007static void be_eq_clean(struct be_eq_obj *eqo)
2008{
2009 int num = events_get(eqo);
2010
2011 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2012}
2013
2014static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015{
2016 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002017 struct be_queue_info *rxq = &rxo->q;
2018 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002019 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002020 struct be_adapter *adapter = rxo->adapter;
2021 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022
Sathya Perlad23e9462012-12-17 19:38:51 +00002023 /* Consume pending rx completions.
2024 * Wait for the flush completion (identified by zero num_rcvd)
2025 * to arrive. Notify CQ even when there are no more CQ entries
2026 * for HW to flush partially coalesced CQ entries.
2027 * In Lancer, there is no need to wait for flush compl.
2028 */
2029 for (;;) {
2030 rxcp = be_rx_compl_get(rxo);
2031 if (rxcp == NULL) {
2032 if (lancer_chip(adapter))
2033 break;
2034
2035 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2036 dev_warn(&adapter->pdev->dev,
2037 "did not receive flush compl\n");
2038 break;
2039 }
2040 be_cq_notify(adapter, rx_cq->id, true, 0);
2041 mdelay(1);
2042 } else {
2043 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002044 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002045 if (rxcp->num_rcvd == 0)
2046 break;
2047 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048 }
2049
Sathya Perlad23e9462012-12-17 19:38:51 +00002050 /* After cleanup, leave the CQ in unarmed state */
2051 be_cq_notify(adapter, rx_cq->id, false, 0);
2052
2053 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302054 while (atomic_read(&rxq->used) > 0) {
2055 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 put_page(page_info->page);
2057 memset(page_info, 0, sizeof(*page_info));
2058 }
2059 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002060 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061}
2062
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002063static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002065 struct be_tx_obj *txo;
2066 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002067 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002068 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002069 struct sk_buff *sent_skb;
2070 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002071 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302073 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002074 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002075 pending_txqs = adapter->num_tx_qs;
2076
2077 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302078 cmpl = 0;
2079 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002080 txq = &txo->q;
2081 while ((txcp = be_tx_compl_get(&txo->cq))) {
2082 end_idx =
2083 AMAP_GET_BITS(struct amap_eth_tx_compl,
2084 wrb_index, txcp);
2085 num_wrbs += be_tx_compl_process(adapter, txo,
2086 end_idx);
2087 cmpl++;
2088 }
2089 if (cmpl) {
2090 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2091 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302092 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002093 }
2094 if (atomic_read(&txq->used) == 0)
2095 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002096 }
2097
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302098 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002099 break;
2100
2101 mdelay(1);
2102 } while (true);
2103
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002104 for_all_tx_queues(adapter, txo, i) {
2105 txq = &txo->q;
2106 if (atomic_read(&txq->used))
2107 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2108 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002109
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002110 /* free posted tx for which compls will never arrive */
2111 while (atomic_read(&txq->used)) {
2112 sent_skb = txo->sent_skb_list[txq->tail];
2113 end_idx = txq->tail;
2114 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2115 &dummy_wrb);
2116 index_adv(&end_idx, num_wrbs - 1, txq->len);
2117 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2118 atomic_sub(num_wrbs, &txq->used);
2119 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002120 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121}
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123static void be_evt_queues_destroy(struct be_adapter *adapter)
2124{
2125 struct be_eq_obj *eqo;
2126 int i;
2127
2128 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002129 if (eqo->q.created) {
2130 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302132 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302133 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002134 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 be_queue_free(adapter, &eqo->q);
2136 }
2137}
2138
2139static int be_evt_queues_create(struct be_adapter *adapter)
2140{
2141 struct be_queue_info *eq;
2142 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302143 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 int i, rc;
2145
Sathya Perla92bf14a2013-08-27 16:57:32 +05302146 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2147 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148
2149 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302150 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2151 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302152 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302153 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154 eqo->adapter = adapter;
2155 eqo->tx_budget = BE_TX_BUDGET;
2156 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302157 aic->max_eqd = BE_MAX_EQD;
2158 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159
2160 eq = &eqo->q;
2161 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302162 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002163 if (rc)
2164 return rc;
2165
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302166 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 if (rc)
2168 return rc;
2169 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002170 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171}
2172
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173static void be_mcc_queues_destroy(struct be_adapter *adapter)
2174{
2175 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176
Sathya Perla8788fdc2009-07-27 22:52:03 +00002177 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002179 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180 be_queue_free(adapter, q);
2181
Sathya Perla8788fdc2009-07-27 22:52:03 +00002182 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002184 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185 be_queue_free(adapter, q);
2186}
2187
2188/* Must be called only after TX qs are created as MCC shares TX EQ */
2189static int be_mcc_queues_create(struct be_adapter *adapter)
2190{
2191 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002192
Sathya Perla8788fdc2009-07-27 22:52:03 +00002193 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302195 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196 goto err;
2197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 /* Use the default EQ for MCC completions */
2199 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 goto mcc_cq_free;
2201
Sathya Perla8788fdc2009-07-27 22:52:03 +00002202 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002203 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2204 goto mcc_cq_destroy;
2205
Sathya Perla8788fdc2009-07-27 22:52:03 +00002206 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002207 goto mcc_q_free;
2208
2209 return 0;
2210
2211mcc_q_free:
2212 be_queue_free(adapter, q);
2213mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002214 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002215mcc_cq_free:
2216 be_queue_free(adapter, cq);
2217err:
2218 return -1;
2219}
2220
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221static void be_tx_queues_destroy(struct be_adapter *adapter)
2222{
2223 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002224 struct be_tx_obj *txo;
2225 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226
Sathya Perla3c8def92011-06-12 20:01:58 +00002227 for_all_tx_queues(adapter, txo, i) {
2228 q = &txo->q;
2229 if (q->created)
2230 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2231 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232
Sathya Perla3c8def92011-06-12 20:01:58 +00002233 q = &txo->cq;
2234 if (q->created)
2235 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2236 be_queue_free(adapter, q);
2237 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238}
2239
Sathya Perla77071332013-08-27 16:57:34 +05302240static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002243 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302244 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245
Sathya Perla92bf14a2013-08-27 16:57:32 +05302246 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002247
Sathya Perla3c8def92011-06-12 20:01:58 +00002248 for_all_tx_queues(adapter, txo, i) {
2249 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2251 sizeof(struct be_eth_tx_compl));
2252 if (status)
2253 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
John Stultz827da442013-10-07 15:51:58 -07002255 u64_stats_init(&txo->stats.sync);
2256 u64_stats_init(&txo->stats.sync_compl);
2257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258 /* If num_evt_qs is less than num_tx_qs, then more than
2259 * one txq share an eq
2260 */
2261 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2262 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2263 if (status)
2264 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002266 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2267 sizeof(struct be_eth_wrb));
2268 if (status)
2269 return status;
2270
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002271 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002272 if (status)
2273 return status;
2274 }
2275
Sathya Perlad3791422012-09-28 04:39:44 +00002276 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2277 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 return 0;
2279}
2280
2281static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282{
2283 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002284 struct be_rx_obj *rxo;
2285 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286
Sathya Perla3abcded2010-10-03 22:12:27 -07002287 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002288 q = &rxo->cq;
2289 if (q->created)
2290 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2291 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293}
2294
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002296{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 struct be_rx_obj *rxo;
2299 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300
Sathya Perla92bf14a2013-08-27 16:57:32 +05302301 /* We can create as many RSS rings as there are EQs. */
2302 adapter->num_rx_qs = adapter->num_evt_qs;
2303
2304 /* We'll use RSS only if atleast 2 RSS rings are supported.
2305 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302307 if (adapter->num_rx_qs > 1)
2308 adapter->num_rx_qs++;
2309
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 for_all_rx_queues(adapter, rxo, i) {
2312 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 cq = &rxo->cq;
2314 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302315 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002316 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318
John Stultz827da442013-10-07 15:51:58 -07002319 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2321 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002322 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325
Sathya Perlad3791422012-09-28 04:39:44 +00002326 dev_info(&adapter->pdev->dev,
2327 "created %d RSS queue(s) and 1 default RX queue\n",
2328 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002330}
2331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332static irqreturn_t be_intx(int irq, void *dev)
2333{
Sathya Perlae49cc342012-11-27 19:50:02 +00002334 struct be_eq_obj *eqo = dev;
2335 struct be_adapter *adapter = eqo->adapter;
2336 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002338 /* IRQ is not expected when NAPI is scheduled as the EQ
2339 * will not be armed.
2340 * But, this can happen on Lancer INTx where it takes
2341 * a while to de-assert INTx or in BE2 where occasionaly
2342 * an interrupt may be raised even when EQ is unarmed.
2343 * If NAPI is already scheduled, then counting & notifying
2344 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002345 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002346 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002347 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002348 __napi_schedule(&eqo->napi);
2349 if (num_evts)
2350 eqo->spurious_intr = 0;
2351 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002352 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002353
2354 /* Return IRQ_HANDLED only for the the first spurious intr
2355 * after a valid intr to stop the kernel from branding
2356 * this irq as a bad one!
2357 */
2358 if (num_evts || eqo->spurious_intr++ == 0)
2359 return IRQ_HANDLED;
2360 else
2361 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362}
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367
Sathya Perla0b545a62012-11-23 00:27:18 +00002368 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2369 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370 return IRQ_HANDLED;
2371}
2372
Sathya Perla2e588f82011-03-11 02:49:26 +00002373static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374{
Somnath Koture38b1702013-05-29 22:55:56 +00002375 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376}
2377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302379 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380{
Sathya Perla3abcded2010-10-03 22:12:27 -07002381 struct be_adapter *adapter = rxo->adapter;
2382 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002383 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 u32 work_done;
2385
2386 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002387 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 if (!rxcp)
2389 break;
2390
Sathya Perla12004ae2011-08-02 19:57:46 +00002391 /* Is it a flush compl that has no data */
2392 if (unlikely(rxcp->num_rcvd == 0))
2393 goto loop_continue;
2394
2395 /* Discard compl with partial DMA Lancer B0 */
2396 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002398 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002399 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002400
Sathya Perla12004ae2011-08-02 19:57:46 +00002401 /* On BE drop pkts that arrive due to imperfect filtering in
2402 * promiscuous mode on some skews
2403 */
2404 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302405 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002407 goto loop_continue;
2408 }
2409
Sathya Perla6384a4d2013-10-25 10:40:16 +05302410 /* Don't do gro when we're busy_polling */
2411 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002412 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002413 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302414 be_rx_compl_process(rxo, napi, rxcp);
2415
Sathya Perla12004ae2011-08-02 19:57:46 +00002416loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002417 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418 }
2419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 if (work_done) {
2421 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002422
Sathya Perla6384a4d2013-10-25 10:40:16 +05302423 /* When an rx-obj gets into post_starved state, just
2424 * let be_worker do the posting.
2425 */
2426 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2427 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431 return work_done;
2432}
2433
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2435 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 for (work_done = 0; work_done < budget; work_done++) {
2441 txcp = be_tx_compl_get(&txo->cq);
2442 if (!txcp)
2443 break;
2444 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302445 AMAP_GET_BITS(struct
2446 amap_eth_tx_compl,
2447 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448 }
2449
2450 if (work_done) {
2451 be_cq_notify(adapter, txo->cq.id, true, work_done);
2452 atomic_sub(num_wrbs, &txo->q.used);
2453
2454 /* As Tx wrbs have been freed up, wake up netdev queue
2455 * if it was stopped due to lack of tx wrbs. */
2456 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302457 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002459 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002460
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002461 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2462 tx_stats(txo)->tx_compl += work_done;
2463 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2464 }
2465 return (work_done < budget); /* Done */
2466}
Sathya Perla3c8def92011-06-12 20:01:58 +00002467
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302468int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469{
2470 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2471 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002472 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302473 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002475
Sathya Perla0b545a62012-11-23 00:27:18 +00002476 num_evts = events_get(eqo);
2477
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478 /* Process all TXQs serviced by this EQ */
2479 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2480 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2481 eqo->tx_budget, i);
2482 if (!tx_done)
2483 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484 }
2485
Sathya Perla6384a4d2013-10-25 10:40:16 +05302486 if (be_lock_napi(eqo)) {
2487 /* This loop will iterate twice for EQ0 in which
2488 * completions of the last RXQ (default one) are also processed
2489 * For other EQs the loop iterates only once
2490 */
2491 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2492 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2493 max_work = max(work, max_work);
2494 }
2495 be_unlock_napi(eqo);
2496 } else {
2497 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002498 }
2499
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002500 if (is_mcc_eqo(eqo))
2501 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002502
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002503 if (max_work < budget) {
2504 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002505 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002506 } else {
2507 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002508 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002509 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511}
2512
Sathya Perla6384a4d2013-10-25 10:40:16 +05302513#ifdef CONFIG_NET_RX_BUSY_POLL
2514static int be_busy_poll(struct napi_struct *napi)
2515{
2516 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2517 struct be_adapter *adapter = eqo->adapter;
2518 struct be_rx_obj *rxo;
2519 int i, work = 0;
2520
2521 if (!be_lock_busy_poll(eqo))
2522 return LL_FLUSH_BUSY;
2523
2524 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2525 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2526 if (work)
2527 break;
2528 }
2529
2530 be_unlock_busy_poll(eqo);
2531 return work;
2532}
2533#endif
2534
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002535void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002536{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002537 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2538 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002539 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302540 bool error_detected = false;
2541 struct device *dev = &adapter->pdev->dev;
2542 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002543
Sathya Perlad23e9462012-12-17 19:38:51 +00002544 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002545 return;
2546
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002547 if (lancer_chip(adapter)) {
2548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2550 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302551 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002552 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302553 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302554 adapter->hw_error = true;
2555 /* Do not log error messages if its a FW reset */
2556 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2557 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2558 dev_info(dev, "Firmware update in progress\n");
2559 } else {
2560 error_detected = true;
2561 dev_err(dev, "Error detected in the card\n");
2562 dev_err(dev, "ERR: sliport status 0x%x\n",
2563 sliport_status);
2564 dev_err(dev, "ERR: sliport error1 0x%x\n",
2565 sliport_err1);
2566 dev_err(dev, "ERR: sliport error2 0x%x\n",
2567 sliport_err2);
2568 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002569 }
2570 } else {
2571 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302572 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002573 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302574 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002575 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302576 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002577 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302578 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002579
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002580 ue_lo = (ue_lo & ~ue_lo_mask);
2581 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002582
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302583 /* On certain platforms BE hardware can indicate spurious UEs.
2584 * Allow HW to stop working completely in case of a real UE.
2585 * Hence not setting the hw_error for UE detection.
2586 */
2587
2588 if (ue_lo || ue_hi) {
2589 error_detected = true;
2590 dev_err(dev,
2591 "Unrecoverable Error detected in the adapter");
2592 dev_err(dev, "Please reboot server to recover");
2593 if (skyhawk_chip(adapter))
2594 adapter->hw_error = true;
2595 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2596 if (ue_lo & 1)
2597 dev_err(dev, "UE: %s bit set\n",
2598 ue_status_low_desc[i]);
2599 }
2600 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2601 if (ue_hi & 1)
2602 dev_err(dev, "UE: %s bit set\n",
2603 ue_status_hi_desc[i]);
2604 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302605 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002606 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302607 if (error_detected)
2608 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002609}
2610
Sathya Perla8d56ff12009-11-22 22:02:26 +00002611static void be_msix_disable(struct be_adapter *adapter)
2612{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002613 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002614 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002615 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302616 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002617 }
2618}
2619
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002620static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002622 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002623 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624
Sathya Perla92bf14a2013-08-27 16:57:32 +05302625 /* If RoCE is supported, program the max number of NIC vectors that
2626 * may be configured via set-channels, along with vectors needed for
2627 * RoCe. Else, just program the number we'll use initially.
2628 */
2629 if (be_roce_supported(adapter))
2630 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2631 2 * num_online_cpus());
2632 else
2633 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002634
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002635 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636 adapter->msix_entries[i].entry = i;
2637
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002638 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2639 MIN_MSIX_VECTORS, num_vec);
2640 if (num_vec < 0)
2641 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002642
Sathya Perla92bf14a2013-08-27 16:57:32 +05302643 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2644 adapter->num_msix_roce_vec = num_vec / 2;
2645 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2646 adapter->num_msix_roce_vec);
2647 }
2648
2649 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2650
2651 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2652 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002653 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002654
2655fail:
2656 dev_warn(dev, "MSIx enable failed\n");
2657
2658 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2659 if (!be_physfn(adapter))
2660 return num_vec;
2661 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662}
2663
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002664static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302665 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002666{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302667 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668}
2669
2670static int be_msix_register(struct be_adapter *adapter)
2671{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002672 struct net_device *netdev = adapter->netdev;
2673 struct be_eq_obj *eqo;
2674 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002676 for_all_evt_queues(adapter, eqo, i) {
2677 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2678 vec = be_msix_vec_get(adapter, eqo);
2679 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002680 if (status)
2681 goto err_msix;
2682 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002683
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002684 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002685err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002686 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2687 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2688 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302689 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002690 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691 return status;
2692}
2693
2694static int be_irq_register(struct be_adapter *adapter)
2695{
2696 struct net_device *netdev = adapter->netdev;
2697 int status;
2698
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002699 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700 status = be_msix_register(adapter);
2701 if (status == 0)
2702 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002703 /* INTx is not supported for VF */
2704 if (!be_physfn(adapter))
2705 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706 }
2707
Sathya Perlae49cc342012-11-27 19:50:02 +00002708 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709 netdev->irq = adapter->pdev->irq;
2710 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002711 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002712 if (status) {
2713 dev_err(&adapter->pdev->dev,
2714 "INTx request IRQ failed - err %d\n", status);
2715 return status;
2716 }
2717done:
2718 adapter->isr_registered = true;
2719 return 0;
2720}
2721
2722static void be_irq_unregister(struct be_adapter *adapter)
2723{
2724 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002725 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002726 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727
2728 if (!adapter->isr_registered)
2729 return;
2730
2731 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002732 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002733 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734 goto done;
2735 }
2736
2737 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738 for_all_evt_queues(adapter, eqo, i)
2739 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002740
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741done:
2742 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002743}
2744
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002745static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002746{
2747 struct be_queue_info *q;
2748 struct be_rx_obj *rxo;
2749 int i;
2750
2751 for_all_rx_queues(adapter, rxo, i) {
2752 q = &rxo->q;
2753 if (q->created) {
2754 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002756 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002757 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002758 }
2759}
2760
Sathya Perla889cd4b2010-05-30 23:33:45 +00002761static int be_close(struct net_device *netdev)
2762{
2763 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 struct be_eq_obj *eqo;
2765 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002766
Kalesh APe1ad8e32014-04-14 16:12:41 +05302767 /* This protection is needed as be_close() may be called even when the
2768 * adapter is in cleared state (after eeh perm failure)
2769 */
2770 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2771 return 0;
2772
Parav Pandit045508a2012-03-26 14:27:13 +00002773 be_roce_dev_close(adapter);
2774
Ivan Veceradff345c52013-11-27 08:59:32 +01002775 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2776 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002777 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302778 be_disable_busy_poll(eqo);
2779 }
David S. Miller71237b62013-11-28 18:53:36 -05002780 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002781 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002782
2783 be_async_mcc_disable(adapter);
2784
2785 /* Wait for all pending tx completions to arrive so that
2786 * all tx skbs are freed.
2787 */
Sathya Perlafba87552013-05-08 02:05:50 +00002788 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302789 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002790
2791 be_rx_qs_destroy(adapter);
2792
Ajit Khaparded11a3472013-11-18 10:44:37 -06002793 for (i = 1; i < (adapter->uc_macs + 1); i++)
2794 be_cmd_pmac_del(adapter, adapter->if_handle,
2795 adapter->pmac_id[i], 0);
2796 adapter->uc_macs = 0;
2797
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002798 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002799 if (msix_enabled(adapter))
2800 synchronize_irq(be_msix_vec_get(adapter, eqo));
2801 else
2802 synchronize_irq(netdev->irq);
2803 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002804 }
2805
Sathya Perla889cd4b2010-05-30 23:33:45 +00002806 be_irq_unregister(adapter);
2807
Sathya Perla482c9e72011-06-29 23:33:17 +00002808 return 0;
2809}
2810
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002811static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002812{
2813 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002814 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302815 u8 rss_hkey[RSS_HASH_KEY_LEN];
2816 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002817
2818 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002819 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2820 sizeof(struct be_eth_rx_d));
2821 if (rc)
2822 return rc;
2823 }
2824
2825 /* The FW would like the default RXQ to be created first */
2826 rxo = default_rxo(adapter);
2827 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2828 adapter->if_handle, false, &rxo->rss_id);
2829 if (rc)
2830 return rc;
2831
2832 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002833 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 rx_frag_size, adapter->if_handle,
2835 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002836 if (rc)
2837 return rc;
2838 }
2839
2840 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302841 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2842 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002843 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302844 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002845 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302846 rss->rsstable[j + i] = rxo->rss_id;
2847 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002848 }
2849 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302850 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2851 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002852
2853 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302854 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2855 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302856 } else {
2857 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302858 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302859 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002860
Venkata Duvvurue2557872014-04-21 15:38:00 +05302861 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302862 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302863 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302864 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302865 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302866 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002867 }
2868
Venkata Duvvurue2557872014-04-21 15:38:00 +05302869 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2870
Sathya Perla482c9e72011-06-29 23:33:17 +00002871 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002872 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002873 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002874 return 0;
2875}
2876
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002877static int be_open(struct net_device *netdev)
2878{
2879 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002880 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002881 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002882 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002883 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002884 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002885
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002886 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002887 if (status)
2888 goto err;
2889
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002890 status = be_irq_register(adapter);
2891 if (status)
2892 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002893
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002895 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002897 for_all_tx_queues(adapter, txo, i)
2898 be_cq_notify(adapter, txo->cq.id, true, 0);
2899
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002900 be_async_mcc_enable(adapter);
2901
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002902 for_all_evt_queues(adapter, eqo, i) {
2903 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302904 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002905 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2906 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002907 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002908
Sathya Perla323ff712012-09-28 04:39:43 +00002909 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002910 if (!status)
2911 be_link_status_update(adapter, link_status);
2912
Sathya Perlafba87552013-05-08 02:05:50 +00002913 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002914 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302915
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302916#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302917 if (skyhawk_chip(adapter))
2918 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302919#endif
2920
Sathya Perla889cd4b2010-05-30 23:33:45 +00002921 return 0;
2922err:
2923 be_close(adapter->netdev);
2924 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002925}
2926
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002927static int be_setup_wol(struct be_adapter *adapter, bool enable)
2928{
2929 struct be_dma_mem cmd;
2930 int status = 0;
2931 u8 mac[ETH_ALEN];
2932
2933 memset(mac, 0, ETH_ALEN);
2934
2935 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002936 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2937 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002938 if (cmd.va == NULL)
2939 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002940
2941 if (enable) {
2942 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302943 PCICFG_PM_CONTROL_OFFSET,
2944 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002945 if (status) {
2946 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002947 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002948 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2949 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002950 return status;
2951 }
2952 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302953 adapter->netdev->dev_addr,
2954 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002955 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2956 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2957 } else {
2958 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2959 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2960 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2961 }
2962
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002963 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002964 return status;
2965}
2966
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002967/*
2968 * Generate a seed MAC address from the PF MAC Address using jhash.
2969 * MAC Address for VFs are assigned incrementally starting from the seed.
2970 * These addresses are programmed in the ASIC by the PF and the VF driver
2971 * queries for the MAC address during its probe.
2972 */
Sathya Perla4c876612013-02-03 20:30:11 +00002973static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002974{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002975 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002976 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002977 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002978 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002979
2980 be_vf_eth_addr_generate(adapter, mac);
2981
Sathya Perla11ac75e2011-12-13 00:58:50 +00002982 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302983 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002984 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002985 vf_cfg->if_handle,
2986 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302987 else
2988 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2989 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002990
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002991 if (status)
2992 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302993 "Mac address assignment failed for VF %d\n",
2994 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002995 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002996 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002997
2998 mac[5] += 1;
2999 }
3000 return status;
3001}
3002
Sathya Perla4c876612013-02-03 20:30:11 +00003003static int be_vfs_mac_query(struct be_adapter *adapter)
3004{
3005 int status, vf;
3006 u8 mac[ETH_ALEN];
3007 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003008
3009 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303010 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3011 mac, vf_cfg->if_handle,
3012 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003013 if (status)
3014 return status;
3015 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3016 }
3017 return 0;
3018}
3019
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003020static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003021{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003022 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003023 u32 vf;
3024
Sathya Perla257a3fe2013-06-14 15:54:51 +05303025 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003026 dev_warn(&adapter->pdev->dev,
3027 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003028 goto done;
3029 }
3030
Sathya Perlab4c1df92013-05-08 02:05:47 +00003031 pci_disable_sriov(adapter->pdev);
3032
Sathya Perla11ac75e2011-12-13 00:58:50 +00003033 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303034 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003035 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3036 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303037 else
3038 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3039 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003040
Sathya Perla11ac75e2011-12-13 00:58:50 +00003041 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3042 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003043done:
3044 kfree(adapter->vf_cfg);
3045 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003046}
3047
Sathya Perla77071332013-08-27 16:57:34 +05303048static void be_clear_queues(struct be_adapter *adapter)
3049{
3050 be_mcc_queues_destroy(adapter);
3051 be_rx_cqs_destroy(adapter);
3052 be_tx_queues_destroy(adapter);
3053 be_evt_queues_destroy(adapter);
3054}
3055
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303056static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003057{
Sathya Perla191eb752012-02-23 18:50:13 +00003058 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3059 cancel_delayed_work_sync(&adapter->work);
3060 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3061 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303062}
3063
Somnath Koturb05004a2013-12-05 12:08:16 +05303064static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303065{
3066 int i;
3067
Somnath Koturb05004a2013-12-05 12:08:16 +05303068 if (adapter->pmac_id) {
3069 for (i = 0; i < (adapter->uc_macs + 1); i++)
3070 be_cmd_pmac_del(adapter, adapter->if_handle,
3071 adapter->pmac_id[i], 0);
3072 adapter->uc_macs = 0;
3073
3074 kfree(adapter->pmac_id);
3075 adapter->pmac_id = NULL;
3076 }
3077}
3078
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303079#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303080static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3081{
3082 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3083 be_cmd_manage_iface(adapter, adapter->if_handle,
3084 OP_CONVERT_TUNNEL_TO_NORMAL);
3085
3086 if (adapter->vxlan_port)
3087 be_cmd_set_vxlan_port(adapter, 0);
3088
3089 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3090 adapter->vxlan_port = 0;
3091}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303092#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303093
Somnath Koturb05004a2013-12-05 12:08:16 +05303094static int be_clear(struct be_adapter *adapter)
3095{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303096 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003097
Sathya Perla11ac75e2011-12-13 00:58:50 +00003098 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003099 be_vf_clear(adapter);
3100
Vasundhara Volambec84e62014-06-30 13:01:32 +05303101 /* Re-configure FW to distribute resources evenly across max-supported
3102 * number of VFs, only when VFs are not already enabled.
3103 */
3104 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3105 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3106 pci_sriov_get_totalvfs(adapter->pdev));
3107
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303108#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303109 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303110#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303111 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303112 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003113
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003114 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003115
Sathya Perla77071332013-08-27 16:57:34 +05303116 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003118 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303119 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003120 return 0;
3121}
3122
Sathya Perla4c876612013-02-03 20:30:11 +00003123static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003124{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303125 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003126 struct be_vf_cfg *vf_cfg;
3127 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003128 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003129
Sathya Perla4c876612013-02-03 20:30:11 +00003130 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3131 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003132
Sathya Perla4c876612013-02-03 20:30:11 +00003133 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303134 if (!BE3_chip(adapter)) {
3135 status = be_cmd_get_profile_config(adapter, &res,
3136 vf + 1);
3137 if (!status)
3138 cap_flags = res.if_cap_flags;
3139 }
Sathya Perla4c876612013-02-03 20:30:11 +00003140
3141 /* If a FW profile exists, then cap_flags are updated */
3142 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303143 BE_IF_FLAGS_BROADCAST |
3144 BE_IF_FLAGS_MULTICAST);
3145 status =
3146 be_cmd_if_create(adapter, cap_flags, en_flags,
3147 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003148 if (status)
3149 goto err;
3150 }
3151err:
3152 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003153}
3154
Sathya Perla39f1d942012-05-08 19:41:24 +00003155static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003156{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003157 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003158 int vf;
3159
Sathya Perla39f1d942012-05-08 19:41:24 +00003160 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3161 GFP_KERNEL);
3162 if (!adapter->vf_cfg)
3163 return -ENOMEM;
3164
Sathya Perla11ac75e2011-12-13 00:58:50 +00003165 for_all_vfs(adapter, vf_cfg, vf) {
3166 vf_cfg->if_handle = -1;
3167 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003168 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003169 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003170}
3171
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003172static int be_vf_setup(struct be_adapter *adapter)
3173{
Sathya Perla4c876612013-02-03 20:30:11 +00003174 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303175 struct be_vf_cfg *vf_cfg;
3176 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303177 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003178
Sathya Perla257a3fe2013-06-14 15:54:51 +05303179 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003180
3181 status = be_vf_setup_init(adapter);
3182 if (status)
3183 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003184
Sathya Perla4c876612013-02-03 20:30:11 +00003185 if (old_vfs) {
3186 for_all_vfs(adapter, vf_cfg, vf) {
3187 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3188 if (status)
3189 goto err;
3190 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003191
Sathya Perla4c876612013-02-03 20:30:11 +00003192 status = be_vfs_mac_query(adapter);
3193 if (status)
3194 goto err;
3195 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303196 status = be_vfs_if_create(adapter);
3197 if (status)
3198 goto err;
3199
Sathya Perla39f1d942012-05-08 19:41:24 +00003200 status = be_vf_eth_addr_config(adapter);
3201 if (status)
3202 goto err;
3203 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003204
Sathya Perla11ac75e2011-12-13 00:58:50 +00003205 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303206 /* Allow VFs to programs MAC/VLAN filters */
3207 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3208 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3209 status = be_cmd_set_fn_privileges(adapter,
3210 privileges |
3211 BE_PRIV_FILTMGMT,
3212 vf + 1);
3213 if (!status)
3214 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3215 vf);
3216 }
3217
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303218 /* Allow full available bandwidth */
3219 if (!old_vfs)
3220 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003221
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303222 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303223 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303224 be_cmd_set_logical_link_config(adapter,
3225 IFLA_VF_LINK_STATE_AUTO,
3226 vf+1);
3227 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003228 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003229
3230 if (!old_vfs) {
3231 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3232 if (status) {
3233 dev_err(dev, "SRIOV enable failed\n");
3234 adapter->num_vfs = 0;
3235 goto err;
3236 }
3237 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003238 return 0;
3239err:
Sathya Perla4c876612013-02-03 20:30:11 +00003240 dev_err(dev, "VF setup failed\n");
3241 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003242 return status;
3243}
3244
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303245/* Converting function_mode bits on BE3 to SH mc_type enums */
3246
3247static u8 be_convert_mc_type(u32 function_mode)
3248{
Suresh Reddy66064db2014-06-23 16:41:29 +05303249 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303250 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303251 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303252 return FLEX10;
3253 else if (function_mode & VNIC_MODE)
3254 return vNIC2;
3255 else if (function_mode & UMC_ENABLED)
3256 return UMC;
3257 else
3258 return MC_NONE;
3259}
3260
Sathya Perla92bf14a2013-08-27 16:57:32 +05303261/* On BE2/BE3 FW does not suggest the supported limits */
3262static void BEx_get_resources(struct be_adapter *adapter,
3263 struct be_resources *res)
3264{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303265 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303266
3267 if (be_physfn(adapter))
3268 res->max_uc_mac = BE_UC_PMAC_COUNT;
3269 else
3270 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3271
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303272 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3273
3274 if (be_is_mc(adapter)) {
3275 /* Assuming that there are 4 channels per port,
3276 * when multi-channel is enabled
3277 */
3278 if (be_is_qnq_mode(adapter))
3279 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3280 else
3281 /* In a non-qnq multichannel mode, the pvid
3282 * takes up one vlan entry
3283 */
3284 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3285 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303286 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303287 }
3288
Sathya Perla92bf14a2013-08-27 16:57:32 +05303289 res->max_mcast_mac = BE_MAX_MC;
3290
Vasundhara Volama5243da2014-03-11 18:53:07 +05303291 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3292 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3293 * *only* if it is RSS-capable.
3294 */
3295 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3296 !be_physfn(adapter) || (be_is_mc(adapter) &&
3297 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303298 res->max_tx_qs = 1;
3299 else
3300 res->max_tx_qs = BE3_MAX_TX_QS;
3301
3302 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3303 !use_sriov && be_physfn(adapter))
3304 res->max_rss_qs = (adapter->be3_native) ?
3305 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3306 res->max_rx_qs = res->max_rss_qs + 1;
3307
Suresh Reddye3dc8672014-01-06 13:02:25 +05303308 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303309 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303310 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3311 else
3312 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303313
3314 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3315 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3316 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3317}
3318
Sathya Perla30128032011-11-10 19:17:57 +00003319static void be_setup_init(struct be_adapter *adapter)
3320{
3321 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003322 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003323 adapter->if_handle = -1;
3324 adapter->be3_native = false;
3325 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003326 if (be_physfn(adapter))
3327 adapter->cmd_privileges = MAX_PRIVILEGES;
3328 else
3329 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003330}
3331
Vasundhara Volambec84e62014-06-30 13:01:32 +05303332static int be_get_sriov_config(struct be_adapter *adapter)
3333{
3334 struct device *dev = &adapter->pdev->dev;
3335 struct be_resources res = {0};
3336 int status, max_vfs, old_vfs;
3337
3338 status = be_cmd_get_profile_config(adapter, &res, 0);
3339 if (status)
3340 return status;
3341
3342 adapter->pool_res = res;
3343
3344 /* Some old versions of BE3 FW don't report max_vfs value */
3345 if (BE3_chip(adapter) && !res.max_vfs) {
3346 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3347 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3348 }
3349
3350 adapter->pool_res.max_vfs = res.max_vfs;
3351 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3352
3353 if (!be_max_vfs(adapter)) {
3354 if (num_vfs)
3355 dev_warn(dev, "device doesn't support SRIOV\n");
3356 adapter->num_vfs = 0;
3357 return 0;
3358 }
3359
3360 /* validate num_vfs module param */
3361 old_vfs = pci_num_vf(adapter->pdev);
3362 if (old_vfs) {
3363 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3364 if (old_vfs != num_vfs)
3365 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3366 adapter->num_vfs = old_vfs;
3367 } else {
3368 if (num_vfs > be_max_vfs(adapter)) {
3369 dev_info(dev, "Resources unavailable to init %d VFs\n",
3370 num_vfs);
3371 dev_info(dev, "Limiting to %d VFs\n",
3372 be_max_vfs(adapter));
3373 }
3374 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3375 }
3376
3377 return 0;
3378}
3379
Sathya Perla92bf14a2013-08-27 16:57:32 +05303380static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003381{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303382 struct device *dev = &adapter->pdev->dev;
3383 struct be_resources res = {0};
3384 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003385
Sathya Perla92bf14a2013-08-27 16:57:32 +05303386 if (BEx_chip(adapter)) {
3387 BEx_get_resources(adapter, &res);
3388 adapter->res = res;
3389 }
3390
Sathya Perla92bf14a2013-08-27 16:57:32 +05303391 /* For Lancer, SH etc read per-function resource limits from FW.
3392 * GET_FUNC_CONFIG returns per function guaranteed limits.
3393 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3394 */
Sathya Perla4c876612013-02-03 20:30:11 +00003395 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303396 status = be_cmd_get_func_config(adapter, &res);
3397 if (status)
3398 return status;
3399
3400 /* If RoCE may be enabled stash away half the EQs for RoCE */
3401 if (be_roce_supported(adapter))
3402 res.max_evt_qs /= 2;
3403 adapter->res = res;
3404
Sathya Perla92bf14a2013-08-27 16:57:32 +05303405 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3406 be_max_txqs(adapter), be_max_rxqs(adapter),
3407 be_max_rss(adapter), be_max_eqs(adapter),
3408 be_max_vfs(adapter));
3409 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3410 be_max_uc(adapter), be_max_mc(adapter),
3411 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003412 }
3413
Sathya Perla92bf14a2013-08-27 16:57:32 +05303414 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003415}
3416
Sathya Perla39f1d942012-05-08 19:41:24 +00003417static int be_get_config(struct be_adapter *adapter)
3418{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303419 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003420 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003421
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003422 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3423 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003424 &adapter->function_caps,
3425 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003426 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303427 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003428
Vasundhara Volam542963b2014-01-15 13:23:33 +05303429 if (be_physfn(adapter)) {
3430 status = be_cmd_get_active_profile(adapter, &profile_id);
3431 if (!status)
3432 dev_info(&adapter->pdev->dev,
3433 "Using profile 0x%x\n", profile_id);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303434
3435 status = be_get_sriov_config(adapter);
3436 if (status)
3437 return status;
3438
3439 /* When the HW is in SRIOV capable configuration, the PF-pool
3440 * resources are equally distributed across the max-number of
3441 * VFs. The user may request only a subset of the max-vfs to be
3442 * enabled. Based on num_vfs, redistribute the resources across
3443 * num_vfs so that each VF will have access to more number of
3444 * resources. This facility is not available in BE3 FW.
3445 * Also, this is done by FW in Lancer chip.
3446 */
3447 if (!pci_num_vf(adapter->pdev)) {
3448 status = be_cmd_set_sriov_config(adapter,
3449 adapter->pool_res,
3450 adapter->num_vfs);
3451 if (status)
3452 return status;
3453 }
Vasundhara Volam542963b2014-01-15 13:23:33 +05303454 }
3455
Sathya Perla92bf14a2013-08-27 16:57:32 +05303456 status = be_get_resources(adapter);
3457 if (status)
3458 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003459
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303460 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3461 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303462 if (!adapter->pmac_id)
3463 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003464
Sathya Perla92bf14a2013-08-27 16:57:32 +05303465 /* Sanitize cfg_num_qs based on HW and platform limits */
3466 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3467
3468 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003469}
3470
Sathya Perla95046b92013-07-23 15:25:02 +05303471static int be_mac_setup(struct be_adapter *adapter)
3472{
3473 u8 mac[ETH_ALEN];
3474 int status;
3475
3476 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3477 status = be_cmd_get_perm_mac(adapter, mac);
3478 if (status)
3479 return status;
3480
3481 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3482 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3483 } else {
3484 /* Maybe the HW was reset; dev_addr must be re-programmed */
3485 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3486 }
3487
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003488 /* For BE3-R VFs, the PF programs the initial MAC address */
3489 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3490 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3491 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303492 return 0;
3493}
3494
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303495static void be_schedule_worker(struct be_adapter *adapter)
3496{
3497 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3498 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3499}
3500
Sathya Perla77071332013-08-27 16:57:34 +05303501static int be_setup_queues(struct be_adapter *adapter)
3502{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303503 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303504 int status;
3505
3506 status = be_evt_queues_create(adapter);
3507 if (status)
3508 goto err;
3509
3510 status = be_tx_qs_create(adapter);
3511 if (status)
3512 goto err;
3513
3514 status = be_rx_cqs_create(adapter);
3515 if (status)
3516 goto err;
3517
3518 status = be_mcc_queues_create(adapter);
3519 if (status)
3520 goto err;
3521
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303522 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3523 if (status)
3524 goto err;
3525
3526 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3527 if (status)
3528 goto err;
3529
Sathya Perla77071332013-08-27 16:57:34 +05303530 return 0;
3531err:
3532 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3533 return status;
3534}
3535
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303536int be_update_queues(struct be_adapter *adapter)
3537{
3538 struct net_device *netdev = adapter->netdev;
3539 int status;
3540
3541 if (netif_running(netdev))
3542 be_close(netdev);
3543
3544 be_cancel_worker(adapter);
3545
3546 /* If any vectors have been shared with RoCE we cannot re-program
3547 * the MSIx table.
3548 */
3549 if (!adapter->num_msix_roce_vec)
3550 be_msix_disable(adapter);
3551
3552 be_clear_queues(adapter);
3553
3554 if (!msix_enabled(adapter)) {
3555 status = be_msix_enable(adapter);
3556 if (status)
3557 return status;
3558 }
3559
3560 status = be_setup_queues(adapter);
3561 if (status)
3562 return status;
3563
3564 be_schedule_worker(adapter);
3565
3566 if (netif_running(netdev))
3567 status = be_open(netdev);
3568
3569 return status;
3570}
3571
Sathya Perla5fb379e2009-06-18 00:02:59 +00003572static int be_setup(struct be_adapter *adapter)
3573{
Sathya Perla39f1d942012-05-08 19:41:24 +00003574 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303575 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003576 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577
Sathya Perla30128032011-11-10 19:17:57 +00003578 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003579
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003580 if (!lancer_chip(adapter))
3581 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003582
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003583 status = be_get_config(adapter);
3584 if (status)
3585 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003586
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003587 status = be_msix_enable(adapter);
3588 if (status)
3589 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590
Sathya Perla77071332013-08-27 16:57:34 +05303591 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3592 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3593 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3594 en_flags |= BE_IF_FLAGS_RSS;
3595 en_flags = en_flags & be_if_cap_flags(adapter);
3596 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3597 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003598 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003599 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003600
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303601 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3602 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303603 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303604 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003605 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003606 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003608 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003609
Sathya Perla95046b92013-07-23 15:25:02 +05303610 status = be_mac_setup(adapter);
3611 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003612 goto err;
3613
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003614 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003615
Somnath Koture9e2a902013-10-24 14:37:53 +05303616 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3617 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3618 adapter->fw_ver);
3619 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3620 }
3621
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003622 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003623 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003624
3625 be_set_rx_mode(adapter->netdev);
3626
Suresh Reddy76a9e082014-01-15 13:23:40 +05303627 be_cmd_get_acpi_wol_cap(adapter);
3628
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003629 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003630
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003631 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3632 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003633 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003634
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303635 if (be_physfn(adapter))
3636 be_cmd_set_logical_link_config(adapter,
3637 IFLA_VF_LINK_STATE_AUTO, 0);
3638
Vasundhara Volambec84e62014-06-30 13:01:32 +05303639 if (adapter->num_vfs)
3640 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003641
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003642 status = be_cmd_get_phy_info(adapter);
3643 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003644 adapter->phy.fc_autoneg = 1;
3645
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303646 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303647 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003648 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003649err:
3650 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003651 return status;
3652}
3653
Ivan Vecera66268732011-12-08 01:31:21 +00003654#ifdef CONFIG_NET_POLL_CONTROLLER
3655static void be_netpoll(struct net_device *netdev)
3656{
3657 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003658 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003659 int i;
3660
Sathya Perlae49cc342012-11-27 19:50:02 +00003661 for_all_evt_queues(adapter, eqo, i) {
3662 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3663 napi_schedule(&eqo->napi);
3664 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003665
3666 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003667}
3668#endif
3669
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303670static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003671
Sathya Perla306f1342011-08-02 19:57:45 +00003672static bool phy_flashing_required(struct be_adapter *adapter)
3673{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003674 return (adapter->phy.phy_type == TN_8022 &&
3675 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003676}
3677
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003678static bool is_comp_in_ufi(struct be_adapter *adapter,
3679 struct flash_section_info *fsec, int type)
3680{
3681 int i = 0, img_type = 0;
3682 struct flash_section_info_g2 *fsec_g2 = NULL;
3683
Sathya Perlaca34fe32012-11-06 17:48:56 +00003684 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003685 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3686
3687 for (i = 0; i < MAX_FLASH_COMP; i++) {
3688 if (fsec_g2)
3689 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3690 else
3691 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3692
3693 if (img_type == type)
3694 return true;
3695 }
3696 return false;
3697
3698}
3699
Jingoo Han4188e7d2013-08-05 18:02:02 +09003700static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303701 int header_size,
3702 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003703{
3704 struct flash_section_info *fsec = NULL;
3705 const u8 *p = fw->data;
3706
3707 p += header_size;
3708 while (p < (fw->data + fw->size)) {
3709 fsec = (struct flash_section_info *)p;
3710 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3711 return fsec;
3712 p += 32;
3713 }
3714 return NULL;
3715}
3716
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303717static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3718 u32 img_offset, u32 img_size, int hdr_size,
3719 u16 img_optype, bool *crc_match)
3720{
3721 u32 crc_offset;
3722 int status;
3723 u8 crc[4];
3724
3725 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3726 if (status)
3727 return status;
3728
3729 crc_offset = hdr_size + img_offset + img_size - 4;
3730
3731 /* Skip flashing, if crc of flashed region matches */
3732 if (!memcmp(crc, p + crc_offset, 4))
3733 *crc_match = true;
3734 else
3735 *crc_match = false;
3736
3737 return status;
3738}
3739
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003740static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303741 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003742{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003743 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303744 u32 total_bytes, flash_op, num_bytes;
3745 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003746
3747 total_bytes = img_size;
3748 while (total_bytes) {
3749 num_bytes = min_t(u32, 32*1024, total_bytes);
3750
3751 total_bytes -= num_bytes;
3752
3753 if (!total_bytes) {
3754 if (optype == OPTYPE_PHY_FW)
3755 flash_op = FLASHROM_OPER_PHY_FLASH;
3756 else
3757 flash_op = FLASHROM_OPER_FLASH;
3758 } else {
3759 if (optype == OPTYPE_PHY_FW)
3760 flash_op = FLASHROM_OPER_PHY_SAVE;
3761 else
3762 flash_op = FLASHROM_OPER_SAVE;
3763 }
3764
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003765 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003766 img += num_bytes;
3767 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303768 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303769 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303770 optype == OPTYPE_PHY_FW)
3771 break;
3772 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003773 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003774 }
3775 return 0;
3776}
3777
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003778/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003779static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303780 const struct firmware *fw,
3781 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003782{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003783 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303784 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003785 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303786 int status, i, filehdr_size, num_comp;
3787 const struct flash_comp *pflashcomp;
3788 bool crc_match;
3789 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003790
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003791 struct flash_comp gen3_flash_types[] = {
3792 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3793 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3794 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3795 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3796 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3797 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3798 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3799 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3800 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3801 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3802 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3803 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3804 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3805 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3806 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3807 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3808 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3809 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3810 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3811 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003812 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003813
3814 struct flash_comp gen2_flash_types[] = {
3815 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3816 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3817 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3818 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3819 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3820 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3821 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3822 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3823 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3824 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3825 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3826 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3827 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3828 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3829 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3830 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003831 };
3832
Sathya Perlaca34fe32012-11-06 17:48:56 +00003833 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003834 pflashcomp = gen3_flash_types;
3835 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003836 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003837 } else {
3838 pflashcomp = gen2_flash_types;
3839 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003840 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003841 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003842
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003843 /* Get flash section info*/
3844 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3845 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303846 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003847 return -1;
3848 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003849 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003850 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003851 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003852
3853 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3854 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3855 continue;
3856
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003857 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3858 !phy_flashing_required(adapter))
3859 continue;
3860
3861 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303862 status = be_check_flash_crc(adapter, fw->data,
3863 pflashcomp[i].offset,
3864 pflashcomp[i].size,
3865 filehdr_size +
3866 img_hdrs_size,
3867 OPTYPE_REDBOOT, &crc_match);
3868 if (status) {
3869 dev_err(dev,
3870 "Could not get CRC for 0x%x region\n",
3871 pflashcomp[i].optype);
3872 continue;
3873 }
3874
3875 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003876 continue;
3877 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003878
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303879 p = fw->data + filehdr_size + pflashcomp[i].offset +
3880 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003881 if (p + pflashcomp[i].size > fw->data + fw->size)
3882 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003883
3884 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303885 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003886 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303887 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003888 pflashcomp[i].img_type);
3889 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003890 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003891 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003892 return 0;
3893}
3894
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303895static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3896{
3897 u32 img_type = le32_to_cpu(fsec_entry.type);
3898 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3899
3900 if (img_optype != 0xFFFF)
3901 return img_optype;
3902
3903 switch (img_type) {
3904 case IMAGE_FIRMWARE_iSCSI:
3905 img_optype = OPTYPE_ISCSI_ACTIVE;
3906 break;
3907 case IMAGE_BOOT_CODE:
3908 img_optype = OPTYPE_REDBOOT;
3909 break;
3910 case IMAGE_OPTION_ROM_ISCSI:
3911 img_optype = OPTYPE_BIOS;
3912 break;
3913 case IMAGE_OPTION_ROM_PXE:
3914 img_optype = OPTYPE_PXE_BIOS;
3915 break;
3916 case IMAGE_OPTION_ROM_FCoE:
3917 img_optype = OPTYPE_FCOE_BIOS;
3918 break;
3919 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3920 img_optype = OPTYPE_ISCSI_BACKUP;
3921 break;
3922 case IMAGE_NCSI:
3923 img_optype = OPTYPE_NCSI_FW;
3924 break;
3925 case IMAGE_FLASHISM_JUMPVECTOR:
3926 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3927 break;
3928 case IMAGE_FIRMWARE_PHY:
3929 img_optype = OPTYPE_SH_PHY_FW;
3930 break;
3931 case IMAGE_REDBOOT_DIR:
3932 img_optype = OPTYPE_REDBOOT_DIR;
3933 break;
3934 case IMAGE_REDBOOT_CONFIG:
3935 img_optype = OPTYPE_REDBOOT_CONFIG;
3936 break;
3937 case IMAGE_UFI_DIR:
3938 img_optype = OPTYPE_UFI_DIR;
3939 break;
3940 default:
3941 break;
3942 }
3943
3944 return img_optype;
3945}
3946
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003947static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303948 const struct firmware *fw,
3949 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003950{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003951 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303952 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003953 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303954 u32 img_offset, img_size, img_type;
3955 int status, i, filehdr_size;
3956 bool crc_match, old_fw_img;
3957 u16 img_optype;
3958 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003959
3960 filehdr_size = sizeof(struct flash_file_hdr_g3);
3961 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3962 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303963 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003964 return -1;
3965 }
3966
3967 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3968 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3969 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303970 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3971 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3972 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003973
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303974 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003975 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303976 /* Don't bother verifying CRC if an old FW image is being
3977 * flashed
3978 */
3979 if (old_fw_img)
3980 goto flash;
3981
3982 status = be_check_flash_crc(adapter, fw->data, img_offset,
3983 img_size, filehdr_size +
3984 img_hdrs_size, img_optype,
3985 &crc_match);
3986 /* The current FW image on the card does not recognize the new
3987 * FLASH op_type. The FW download is partially complete.
3988 * Reboot the server now to enable FW image to recognize the
3989 * new FLASH op_type. To complete the remaining process,
3990 * download the same FW again after the reboot.
3991 */
Kalesh AP4c600052014-05-30 19:06:26 +05303992 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3993 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303994 dev_err(dev, "Flash incomplete. Reset the server\n");
3995 dev_err(dev, "Download FW image again after reset\n");
3996 return -EAGAIN;
3997 } else if (status) {
3998 dev_err(dev, "Could not get CRC for 0x%x region\n",
3999 img_optype);
4000 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004001 }
4002
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304003 if (crc_match)
4004 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004005
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304006flash:
4007 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004008 if (p + img_size > fw->data + fw->size)
4009 return -1;
4010
4011 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304012 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4013 * UFI_DIR region
4014 */
Kalesh AP4c600052014-05-30 19:06:26 +05304015 if (old_fw_img &&
4016 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4017 (img_optype == OPTYPE_UFI_DIR &&
4018 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304019 continue;
4020 } else if (status) {
4021 dev_err(dev, "Flashing section type 0x%x failed\n",
4022 img_type);
4023 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004024 }
4025 }
4026 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004027}
4028
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004029static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304030 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004031{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004032#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4033#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4034 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004035 const u8 *data_ptr = NULL;
4036 u8 *dest_image_ptr = NULL;
4037 size_t image_size = 0;
4038 u32 chunk_size = 0;
4039 u32 data_written = 0;
4040 u32 offset = 0;
4041 int status = 0;
4042 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004043 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004044
4045 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4046 dev_err(&adapter->pdev->dev,
4047 "FW Image not properly aligned. "
4048 "Length must be 4 byte aligned.\n");
4049 status = -EINVAL;
4050 goto lancer_fw_exit;
4051 }
4052
4053 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4054 + LANCER_FW_DOWNLOAD_CHUNK;
4055 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004056 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004057 if (!flash_cmd.va) {
4058 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004059 goto lancer_fw_exit;
4060 }
4061
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004062 dest_image_ptr = flash_cmd.va +
4063 sizeof(struct lancer_cmd_req_write_object);
4064 image_size = fw->size;
4065 data_ptr = fw->data;
4066
4067 while (image_size) {
4068 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4069
4070 /* Copy the image chunk content. */
4071 memcpy(dest_image_ptr, data_ptr, chunk_size);
4072
4073 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004074 chunk_size, offset,
4075 LANCER_FW_DOWNLOAD_LOCATION,
4076 &data_written, &change_status,
4077 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004078 if (status)
4079 break;
4080
4081 offset += data_written;
4082 data_ptr += data_written;
4083 image_size -= data_written;
4084 }
4085
4086 if (!status) {
4087 /* Commit the FW written */
4088 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004089 0, offset,
4090 LANCER_FW_DOWNLOAD_LOCATION,
4091 &data_written, &change_status,
4092 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004093 }
4094
4095 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05304096 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004097 if (status) {
4098 dev_err(&adapter->pdev->dev,
4099 "Firmware load error. "
4100 "Status code: 0x%x Additional Status: 0x%x\n",
4101 status, add_status);
4102 goto lancer_fw_exit;
4103 }
4104
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05304106 dev_info(&adapter->pdev->dev,
4107 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004108 status = lancer_physdev_ctrl(adapter,
4109 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004110 if (status) {
4111 dev_err(&adapter->pdev->dev,
4112 "Adapter busy for FW reset.\n"
4113 "New FW will not be active.\n");
4114 goto lancer_fw_exit;
4115 }
4116 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05304117 dev_err(&adapter->pdev->dev,
4118 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004119 }
4120
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004121 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4122lancer_fw_exit:
4123 return status;
4124}
4125
Sathya Perlaca34fe32012-11-06 17:48:56 +00004126#define UFI_TYPE2 2
4127#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004128#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004129#define UFI_TYPE4 4
4130static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004131 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004132{
4133 if (fhdr == NULL)
4134 goto be_get_ufi_exit;
4135
Sathya Perlaca34fe32012-11-06 17:48:56 +00004136 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4137 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004138 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4139 if (fhdr->asic_type_rev == 0x10)
4140 return UFI_TYPE3R;
4141 else
4142 return UFI_TYPE3;
4143 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004144 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004145
4146be_get_ufi_exit:
4147 dev_err(&adapter->pdev->dev,
4148 "UFI and Interface are not compatible for flashing\n");
4149 return -1;
4150}
4151
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004152static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4153{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004154 struct flash_file_hdr_g3 *fhdr3;
4155 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004156 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004157 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004158 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004159
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004160 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004161 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4162 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004163 if (!flash_cmd.va) {
4164 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004165 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004166 }
4167
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004168 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004169 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004170
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004171 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004172
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004173 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4174 for (i = 0; i < num_imgs; i++) {
4175 img_hdr_ptr = (struct image_hdr *)(fw->data +
4176 (sizeof(struct flash_file_hdr_g3) +
4177 i * sizeof(struct image_hdr)));
4178 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004179 switch (ufi_type) {
4180 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004181 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304182 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004183 break;
4184 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004185 status = be_flash_BEx(adapter, fw, &flash_cmd,
4186 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004187 break;
4188 case UFI_TYPE3:
4189 /* Do not flash this ufi on BE3-R cards */
4190 if (adapter->asic_rev < 0x10)
4191 status = be_flash_BEx(adapter, fw,
4192 &flash_cmd,
4193 num_imgs);
4194 else {
4195 status = -1;
4196 dev_err(&adapter->pdev->dev,
4197 "Can't load BE3 UFI on BE3R\n");
4198 }
4199 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004200 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004201 }
4202
Sathya Perlaca34fe32012-11-06 17:48:56 +00004203 if (ufi_type == UFI_TYPE2)
4204 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004205 else if (ufi_type == -1)
4206 status = -1;
4207
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004208 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4209 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004210 if (status) {
4211 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004212 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004213 }
4214
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004215 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004216
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004217be_fw_exit:
4218 return status;
4219}
4220
4221int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4222{
4223 const struct firmware *fw;
4224 int status;
4225
4226 if (!netif_running(adapter->netdev)) {
4227 dev_err(&adapter->pdev->dev,
4228 "Firmware load not allowed (interface is down)\n");
4229 return -1;
4230 }
4231
4232 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4233 if (status)
4234 goto fw_exit;
4235
4236 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4237
4238 if (lancer_chip(adapter))
4239 status = lancer_fw_download(adapter, fw);
4240 else
4241 status = be_fw_download(adapter, fw);
4242
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004243 if (!status)
4244 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4245 adapter->fw_on_flash);
4246
Ajit Khaparde84517482009-09-04 03:12:16 +00004247fw_exit:
4248 release_firmware(fw);
4249 return status;
4250}
4251
Sathya Perla748b5392014-05-09 13:29:13 +05304252static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004253{
4254 struct be_adapter *adapter = netdev_priv(dev);
4255 struct nlattr *attr, *br_spec;
4256 int rem;
4257 int status = 0;
4258 u16 mode = 0;
4259
4260 if (!sriov_enabled(adapter))
4261 return -EOPNOTSUPP;
4262
4263 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4264
4265 nla_for_each_nested(attr, br_spec, rem) {
4266 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4267 continue;
4268
4269 mode = nla_get_u16(attr);
4270 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4271 return -EINVAL;
4272
4273 status = be_cmd_set_hsw_config(adapter, 0, 0,
4274 adapter->if_handle,
4275 mode == BRIDGE_MODE_VEPA ?
4276 PORT_FWD_TYPE_VEPA :
4277 PORT_FWD_TYPE_VEB);
4278 if (status)
4279 goto err;
4280
4281 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4282 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4283
4284 return status;
4285 }
4286err:
4287 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4288 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4289
4290 return status;
4291}
4292
4293static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304294 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004295{
4296 struct be_adapter *adapter = netdev_priv(dev);
4297 int status = 0;
4298 u8 hsw_mode;
4299
4300 if (!sriov_enabled(adapter))
4301 return 0;
4302
4303 /* BE and Lancer chips support VEB mode only */
4304 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4305 hsw_mode = PORT_FWD_TYPE_VEB;
4306 } else {
4307 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4308 adapter->if_handle, &hsw_mode);
4309 if (status)
4310 return 0;
4311 }
4312
4313 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4314 hsw_mode == PORT_FWD_TYPE_VEPA ?
4315 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4316}
4317
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304318#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304319static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4320 __be16 port)
4321{
4322 struct be_adapter *adapter = netdev_priv(netdev);
4323 struct device *dev = &adapter->pdev->dev;
4324 int status;
4325
4326 if (lancer_chip(adapter) || BEx_chip(adapter))
4327 return;
4328
4329 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4330 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4331 be16_to_cpu(port));
4332 dev_info(dev,
4333 "Only one UDP port supported for VxLAN offloads\n");
4334 return;
4335 }
4336
4337 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4338 OP_CONVERT_NORMAL_TO_TUNNEL);
4339 if (status) {
4340 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4341 goto err;
4342 }
4343
4344 status = be_cmd_set_vxlan_port(adapter, port);
4345 if (status) {
4346 dev_warn(dev, "Failed to add VxLAN port\n");
4347 goto err;
4348 }
4349 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4350 adapter->vxlan_port = port;
4351
4352 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4353 be16_to_cpu(port));
4354 return;
4355err:
4356 be_disable_vxlan_offloads(adapter);
4357 return;
4358}
4359
4360static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4361 __be16 port)
4362{
4363 struct be_adapter *adapter = netdev_priv(netdev);
4364
4365 if (lancer_chip(adapter) || BEx_chip(adapter))
4366 return;
4367
4368 if (adapter->vxlan_port != port)
4369 return;
4370
4371 be_disable_vxlan_offloads(adapter);
4372
4373 dev_info(&adapter->pdev->dev,
4374 "Disabled VxLAN offloads for UDP port %d\n",
4375 be16_to_cpu(port));
4376}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304377#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304378
stephen hemmingere5686ad2012-01-05 19:10:25 +00004379static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004380 .ndo_open = be_open,
4381 .ndo_stop = be_close,
4382 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004383 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004384 .ndo_set_mac_address = be_mac_addr_set,
4385 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004386 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004387 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4389 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004390 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004391 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004392 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004393 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304394 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004395#ifdef CONFIG_NET_POLL_CONTROLLER
4396 .ndo_poll_controller = be_netpoll,
4397#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004398 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4399 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304400#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304401 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304402#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304403#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304404 .ndo_add_vxlan_port = be_add_vxlan_port,
4405 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304406#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004407};
4408
4409static void be_netdev_init(struct net_device *netdev)
4410{
4411 struct be_adapter *adapter = netdev_priv(netdev);
4412
Sathya Perlac9c47142014-03-27 10:46:19 +05304413 if (skyhawk_chip(adapter)) {
4414 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4415 NETIF_F_TSO | NETIF_F_TSO6 |
4416 NETIF_F_GSO_UDP_TUNNEL;
4417 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4418 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004419 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004420 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004421 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004422 if (be_multi_rxq(adapter))
4423 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004424
4425 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004426 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004427
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004428 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004429 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004430
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004431 netdev->priv_flags |= IFF_UNICAST_FLT;
4432
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433 netdev->flags |= IFF_MULTICAST;
4434
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004435 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004437 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004438
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004439 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004440}
4441
4442static void be_unmap_pci_bars(struct be_adapter *adapter)
4443{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004444 if (adapter->csr)
4445 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004446 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004447 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004448}
4449
Sathya Perlace66f782012-11-06 17:48:58 +00004450static int db_bar(struct be_adapter *adapter)
4451{
4452 if (lancer_chip(adapter) || !be_physfn(adapter))
4453 return 0;
4454 else
4455 return 4;
4456}
4457
4458static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004459{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004460 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004461 adapter->roce_db.size = 4096;
4462 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4463 db_bar(adapter));
4464 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4465 db_bar(adapter));
4466 }
Parav Pandit045508a2012-03-26 14:27:13 +00004467 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004468}
4469
4470static int be_map_pci_bars(struct be_adapter *adapter)
4471{
4472 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004473
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004474 if (BEx_chip(adapter) && be_physfn(adapter)) {
4475 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4476 if (adapter->csr == NULL)
4477 return -ENOMEM;
4478 }
4479
Sathya Perlace66f782012-11-06 17:48:58 +00004480 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004481 if (addr == NULL)
4482 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004483 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004484
4485 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004486 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004487
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004488pci_map_err:
4489 be_unmap_pci_bars(adapter);
4490 return -ENOMEM;
4491}
4492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004493static void be_ctrl_cleanup(struct be_adapter *adapter)
4494{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004495 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004496
4497 be_unmap_pci_bars(adapter);
4498
4499 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004500 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4501 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004502
Sathya Perla5b8821b2011-08-02 19:57:44 +00004503 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004504 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004505 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4506 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004507}
4508
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004509static int be_ctrl_init(struct be_adapter *adapter)
4510{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004511 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4512 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004513 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004514 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004516
Sathya Perlace66f782012-11-06 17:48:58 +00004517 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4518 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4519 SLI_INTF_FAMILY_SHIFT;
4520 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4521
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004522 status = be_map_pci_bars(adapter);
4523 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004524 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004525
4526 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004527 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4528 mbox_mem_alloc->size,
4529 &mbox_mem_alloc->dma,
4530 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004532 status = -ENOMEM;
4533 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004534 }
4535 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4536 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4537 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4538 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004539
Sathya Perla5b8821b2011-08-02 19:57:44 +00004540 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004541 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4542 rx_filter->size, &rx_filter->dma,
4543 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004544 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004545 status = -ENOMEM;
4546 goto free_mbox;
4547 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004548
Ivan Vecera29849612010-12-14 05:43:19 +00004549 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004550 spin_lock_init(&adapter->mcc_lock);
4551 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004552
Suresh Reddy5eeff632014-01-06 13:02:24 +05304553 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004554 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004556
4557free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004558 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4559 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004560
4561unmap_pci_bars:
4562 be_unmap_pci_bars(adapter);
4563
4564done:
4565 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566}
4567
4568static void be_stats_cleanup(struct be_adapter *adapter)
4569{
Sathya Perla3abcded2010-10-03 22:12:27 -07004570 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004571
4572 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004573 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4574 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575}
4576
4577static int be_stats_init(struct be_adapter *adapter)
4578{
Sathya Perla3abcded2010-10-03 22:12:27 -07004579 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004580
Sathya Perlaca34fe32012-11-06 17:48:56 +00004581 if (lancer_chip(adapter))
4582 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4583 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004584 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004585 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004586 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004587 else
4588 /* ALL non-BE ASICs */
4589 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004590
Joe Perchesede23fa2013-08-26 22:45:23 -07004591 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4592 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004593 if (cmd->va == NULL)
4594 return -1;
4595 return 0;
4596}
4597
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004598static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004599{
4600 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004601
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004602 if (!adapter)
4603 return;
4604
Parav Pandit045508a2012-03-26 14:27:13 +00004605 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004606 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004607
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004608 cancel_delayed_work_sync(&adapter->func_recovery_work);
4609
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004610 unregister_netdev(adapter->netdev);
4611
Sathya Perla5fb379e2009-06-18 00:02:59 +00004612 be_clear(adapter);
4613
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004614 /* tell fw we're done with firing cmds */
4615 be_cmd_fw_clean(adapter);
4616
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004617 be_stats_cleanup(adapter);
4618
4619 be_ctrl_cleanup(adapter);
4620
Sathya Perlad6b6d982012-09-05 01:56:48 +00004621 pci_disable_pcie_error_reporting(pdev);
4622
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004623 pci_release_regions(pdev);
4624 pci_disable_device(pdev);
4625
4626 free_netdev(adapter->netdev);
4627}
4628
Sathya Perla39f1d942012-05-08 19:41:24 +00004629static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004630{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304631 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004632
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004633 status = be_cmd_get_cntl_attributes(adapter);
4634 if (status)
4635 return status;
4636
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004637 /* Must be a power of 2 or else MODULO will BUG_ON */
4638 adapter->be_get_temp_freq = 64;
4639
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304640 if (BEx_chip(adapter)) {
4641 level = be_cmd_get_fw_log_level(adapter);
4642 adapter->msg_enable =
4643 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4644 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004645
Sathya Perla92bf14a2013-08-27 16:57:32 +05304646 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004647 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004648}
4649
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004650static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004651{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004652 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004653 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004654
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004655 status = lancer_test_and_set_rdy_state(adapter);
4656 if (status)
4657 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004658
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004659 if (netif_running(adapter->netdev))
4660 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004661
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004662 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004663
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004664 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004665
4666 status = be_setup(adapter);
4667 if (status)
4668 goto err;
4669
4670 if (netif_running(adapter->netdev)) {
4671 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004672 if (status)
4673 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004674 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004675
Somnath Kotur4bebb562013-12-05 12:07:55 +05304676 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004677 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004678err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004679 if (status == -EAGAIN)
4680 dev_err(dev, "Waiting for resource provisioning\n");
4681 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304682 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004683
4684 return status;
4685}
4686
4687static void be_func_recovery_task(struct work_struct *work)
4688{
4689 struct be_adapter *adapter =
4690 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004691 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004692
4693 be_detect_error(adapter);
4694
4695 if (adapter->hw_error && lancer_chip(adapter)) {
4696
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004697 rtnl_lock();
4698 netif_device_detach(adapter->netdev);
4699 rtnl_unlock();
4700
4701 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004702 if (!status)
4703 netif_device_attach(adapter->netdev);
4704 }
4705
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004706 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4707 * no need to attempt further recovery.
4708 */
4709 if (!status || status == -EAGAIN)
4710 schedule_delayed_work(&adapter->func_recovery_work,
4711 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004712}
4713
4714static void be_worker(struct work_struct *work)
4715{
4716 struct be_adapter *adapter =
4717 container_of(work, struct be_adapter, work.work);
4718 struct be_rx_obj *rxo;
4719 int i;
4720
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004721 /* when interrupts are not yet enabled, just reap any pending
4722 * mcc completions */
4723 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004724 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004725 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004726 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004727 goto reschedule;
4728 }
4729
4730 if (!adapter->stats_cmd_sent) {
4731 if (lancer_chip(adapter))
4732 lancer_cmd_get_pport_stats(adapter,
4733 &adapter->stats_cmd);
4734 else
4735 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4736 }
4737
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304738 if (be_physfn(adapter) &&
4739 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004740 be_cmd_get_die_temperature(adapter);
4741
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004742 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304743 /* Replenish RX-queues starved due to memory
4744 * allocation failures.
4745 */
4746 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004747 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004748 }
4749
Sathya Perla2632baf2013-10-01 16:00:00 +05304750 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004751
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004752reschedule:
4753 adapter->work_counter++;
4754 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4755}
4756
Sathya Perla257a3fe2013-06-14 15:54:51 +05304757/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004758static bool be_reset_required(struct be_adapter *adapter)
4759{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304760 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004761}
4762
Sathya Perlad3791422012-09-28 04:39:44 +00004763static char *mc_name(struct be_adapter *adapter)
4764{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304765 char *str = ""; /* default */
4766
4767 switch (adapter->mc_type) {
4768 case UMC:
4769 str = "UMC";
4770 break;
4771 case FLEX10:
4772 str = "FLEX10";
4773 break;
4774 case vNIC1:
4775 str = "vNIC-1";
4776 break;
4777 case nPAR:
4778 str = "nPAR";
4779 break;
4780 case UFP:
4781 str = "UFP";
4782 break;
4783 case vNIC2:
4784 str = "vNIC-2";
4785 break;
4786 default:
4787 str = "";
4788 }
4789
4790 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004791}
4792
4793static inline char *func_name(struct be_adapter *adapter)
4794{
4795 return be_physfn(adapter) ? "PF" : "VF";
4796}
4797
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004798static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004799{
4800 int status = 0;
4801 struct be_adapter *adapter;
4802 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004803 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004804
4805 status = pci_enable_device(pdev);
4806 if (status)
4807 goto do_none;
4808
4809 status = pci_request_regions(pdev, DRV_NAME);
4810 if (status)
4811 goto disable_dev;
4812 pci_set_master(pdev);
4813
Sathya Perla7f640062012-06-05 19:37:20 +00004814 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004815 if (netdev == NULL) {
4816 status = -ENOMEM;
4817 goto rel_reg;
4818 }
4819 adapter = netdev_priv(netdev);
4820 adapter->pdev = pdev;
4821 pci_set_drvdata(pdev, adapter);
4822 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004823 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004824
Russell King4c15c242013-06-26 23:49:11 +01004825 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004826 if (!status) {
4827 netdev->features |= NETIF_F_HIGHDMA;
4828 } else {
Russell King4c15c242013-06-26 23:49:11 +01004829 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004830 if (status) {
4831 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4832 goto free_netdev;
4833 }
4834 }
4835
Ajit Khapardeea58c182013-10-18 16:06:24 -05004836 if (be_physfn(adapter)) {
4837 status = pci_enable_pcie_error_reporting(pdev);
4838 if (!status)
4839 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4840 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004841
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004842 status = be_ctrl_init(adapter);
4843 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004844 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004845
Sathya Perla2243e2e2009-11-22 22:02:03 +00004846 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004847 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004848 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004849 if (status)
4850 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004851 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004852
Sathya Perla39f1d942012-05-08 19:41:24 +00004853 if (be_reset_required(adapter)) {
4854 status = be_cmd_reset_function(adapter);
4855 if (status)
4856 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004857
Kalesh AP2d177be2013-04-28 22:22:29 +00004858 /* Wait for interrupts to quiesce after an FLR */
4859 msleep(100);
4860 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004861
4862 /* Allow interrupts for other ULPs running on NIC function */
4863 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004864
Kalesh AP2d177be2013-04-28 22:22:29 +00004865 /* tell fw we're ready to fire cmds */
4866 status = be_cmd_fw_init(adapter);
4867 if (status)
4868 goto ctrl_clean;
4869
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004870 status = be_stats_init(adapter);
4871 if (status)
4872 goto ctrl_clean;
4873
Sathya Perla39f1d942012-05-08 19:41:24 +00004874 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004875 if (status)
4876 goto stats_clean;
4877
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004878 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004879 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004880 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004881
Sathya Perla5fb379e2009-06-18 00:02:59 +00004882 status = be_setup(adapter);
4883 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004884 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004885
Sathya Perla3abcded2010-10-03 22:12:27 -07004886 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004887 status = register_netdev(netdev);
4888 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004889 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004890
Parav Pandit045508a2012-03-26 14:27:13 +00004891 be_roce_dev_add(adapter);
4892
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004893 schedule_delayed_work(&adapter->func_recovery_work,
4894 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004895
4896 be_cmd_query_port_name(adapter, &port_name);
4897
Sathya Perlad3791422012-09-28 04:39:44 +00004898 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4899 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004900
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004901 return 0;
4902
Sathya Perla5fb379e2009-06-18 00:02:59 +00004903unsetup:
4904 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004905stats_clean:
4906 be_stats_cleanup(adapter);
4907ctrl_clean:
4908 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004909free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004910 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004911rel_reg:
4912 pci_release_regions(pdev);
4913disable_dev:
4914 pci_disable_device(pdev);
4915do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004916 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004917 return status;
4918}
4919
4920static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4921{
4922 struct be_adapter *adapter = pci_get_drvdata(pdev);
4923 struct net_device *netdev = adapter->netdev;
4924
Suresh Reddy76a9e082014-01-15 13:23:40 +05304925 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004926 be_setup_wol(adapter, true);
4927
Ajit Khaparded4360d62013-11-22 12:51:09 -06004928 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004929 cancel_delayed_work_sync(&adapter->func_recovery_work);
4930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004931 netif_device_detach(netdev);
4932 if (netif_running(netdev)) {
4933 rtnl_lock();
4934 be_close(netdev);
4935 rtnl_unlock();
4936 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004937 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004938
4939 pci_save_state(pdev);
4940 pci_disable_device(pdev);
4941 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4942 return 0;
4943}
4944
4945static int be_resume(struct pci_dev *pdev)
4946{
4947 int status = 0;
4948 struct be_adapter *adapter = pci_get_drvdata(pdev);
4949 struct net_device *netdev = adapter->netdev;
4950
4951 netif_device_detach(netdev);
4952
4953 status = pci_enable_device(pdev);
4954 if (status)
4955 return status;
4956
Yijing Wang1ca01512013-06-27 20:53:42 +08004957 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004958 pci_restore_state(pdev);
4959
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304960 status = be_fw_wait_ready(adapter);
4961 if (status)
4962 return status;
4963
Ajit Khaparded4360d62013-11-22 12:51:09 -06004964 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004965 /* tell fw we're ready to fire cmds */
4966 status = be_cmd_fw_init(adapter);
4967 if (status)
4968 return status;
4969
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004970 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004971 if (netif_running(netdev)) {
4972 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004973 be_open(netdev);
4974 rtnl_unlock();
4975 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004976
4977 schedule_delayed_work(&adapter->func_recovery_work,
4978 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004979 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004980
Suresh Reddy76a9e082014-01-15 13:23:40 +05304981 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004982 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004983
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004984 return 0;
4985}
4986
Sathya Perla82456b02010-02-17 01:35:37 +00004987/*
4988 * An FLR will stop BE from DMAing any data.
4989 */
4990static void be_shutdown(struct pci_dev *pdev)
4991{
4992 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004993
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004994 if (!adapter)
4995 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004996
Sathya Perla0f4a6822011-03-21 20:49:28 +00004997 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004998 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004999
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005000 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005001
Ajit Khaparde57841862011-04-06 18:08:43 +00005002 be_cmd_reset_function(adapter);
5003
Sathya Perla82456b02010-02-17 01:35:37 +00005004 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005005}
5006
Sathya Perlacf588472010-02-14 21:22:01 +00005007static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305008 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005009{
5010 struct be_adapter *adapter = pci_get_drvdata(pdev);
5011 struct net_device *netdev = adapter->netdev;
5012
5013 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5014
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005015 if (!adapter->eeh_error) {
5016 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005017
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005018 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005019
Sathya Perlacf588472010-02-14 21:22:01 +00005020 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005021 netif_device_detach(netdev);
5022 if (netif_running(netdev))
5023 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005024 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005025
5026 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005027 }
Sathya Perlacf588472010-02-14 21:22:01 +00005028
5029 if (state == pci_channel_io_perm_failure)
5030 return PCI_ERS_RESULT_DISCONNECT;
5031
5032 pci_disable_device(pdev);
5033
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005034 /* The error could cause the FW to trigger a flash debug dump.
5035 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005036 * can cause it not to recover; wait for it to finish.
5037 * Wait only for first function as it is needed only once per
5038 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005039 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005040 if (pdev->devfn == 0)
5041 ssleep(30);
5042
Sathya Perlacf588472010-02-14 21:22:01 +00005043 return PCI_ERS_RESULT_NEED_RESET;
5044}
5045
5046static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5047{
5048 struct be_adapter *adapter = pci_get_drvdata(pdev);
5049 int status;
5050
5051 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005052
5053 status = pci_enable_device(pdev);
5054 if (status)
5055 return PCI_ERS_RESULT_DISCONNECT;
5056
5057 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005058 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005059 pci_restore_state(pdev);
5060
5061 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005062 dev_info(&adapter->pdev->dev,
5063 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005064 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005065 if (status)
5066 return PCI_ERS_RESULT_DISCONNECT;
5067
Sathya Perlad6b6d982012-09-05 01:56:48 +00005068 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005069 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005070 return PCI_ERS_RESULT_RECOVERED;
5071}
5072
5073static void be_eeh_resume(struct pci_dev *pdev)
5074{
5075 int status = 0;
5076 struct be_adapter *adapter = pci_get_drvdata(pdev);
5077 struct net_device *netdev = adapter->netdev;
5078
5079 dev_info(&adapter->pdev->dev, "EEH resume\n");
5080
5081 pci_save_state(pdev);
5082
Kalesh AP2d177be2013-04-28 22:22:29 +00005083 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005084 if (status)
5085 goto err;
5086
Kalesh AP03a58ba2014-05-13 14:03:11 +05305087 /* On some BE3 FW versions, after a HW reset,
5088 * interrupts will remain disabled for each function.
5089 * So, explicitly enable interrupts
5090 */
5091 be_intr_set(adapter, true);
5092
Kalesh AP2d177be2013-04-28 22:22:29 +00005093 /* tell fw we're ready to fire cmds */
5094 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005095 if (status)
5096 goto err;
5097
Sathya Perlacf588472010-02-14 21:22:01 +00005098 status = be_setup(adapter);
5099 if (status)
5100 goto err;
5101
5102 if (netif_running(netdev)) {
5103 status = be_open(netdev);
5104 if (status)
5105 goto err;
5106 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005107
5108 schedule_delayed_work(&adapter->func_recovery_work,
5109 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005110 netif_device_attach(netdev);
5111 return;
5112err:
5113 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005114}
5115
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005116static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005117 .error_detected = be_eeh_err_detected,
5118 .slot_reset = be_eeh_reset,
5119 .resume = be_eeh_resume,
5120};
5121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005122static struct pci_driver be_driver = {
5123 .name = DRV_NAME,
5124 .id_table = be_dev_ids,
5125 .probe = be_probe,
5126 .remove = be_remove,
5127 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005128 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005129 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005130 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005131};
5132
5133static int __init be_init_module(void)
5134{
Joe Perches8e95a202009-12-03 07:58:21 +00005135 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5136 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005137 printk(KERN_WARNING DRV_NAME
5138 " : Module param rx_frag_size must be 2048/4096/8192."
5139 " Using 2048\n");
5140 rx_frag_size = 2048;
5141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005143 return pci_register_driver(&be_driver);
5144}
5145module_init(be_init_module);
5146
5147static void __exit be_exit_module(void)
5148{
5149 pci_unregister_driver(&be_driver);
5150}
5151module_exit(be_exit_module);