blob: 6297e72b77e2e1d294973057adf0f2139cf8140d [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Patrick McHardy80d5c362013-04-19 02:04:28 +00001148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301155 return status;
1156
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301157 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301158 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301161 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001162
Somnath Kotura6b74e02014-01-21 15:50:55 +05301163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301168
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001169 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170}
1171
Patrick McHardy80d5c362013-04-19 02:04:28 +00001172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301178 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001179
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301180 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Somnath kotur7ad09452014-03-03 14:24:43 +05301186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
Sathya Perlaa54769f2011-10-24 02:45:00 +00001194static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001197 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198
1199 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001201 adapter->promiscuous = true;
1202 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001204
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001205 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001206 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301207 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001208 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001209 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 }
1211
Sathya Perlae7b909a2009-11-22 22:01:10 +00001212 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001213 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001216
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
Sathya Perla92bf14a2013-08-27 16:57:32 +05301226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001245 }
Kalesh APa0794882014-05-30 19:06:23 +05301246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001257done:
1258 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259}
1260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001265 int status;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268 return -EPERM;
1269
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return -EINVAL;
1272
Sathya Perla3175d8c2013-07-23 15:25:03 +05301273 if (BEx_chip(adapter)) {
1274 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1275 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1278 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301279 } else {
1280 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1281 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001282 }
1283
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001284 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001285 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301286 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001287 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001288 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001289
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001290 return status;
1291}
1292
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001293static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301294 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001295{
1296 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001297 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001298
Sathya Perla11ac75e2011-12-13 00:58:50 +00001299 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001300 return -EPERM;
1301
Sathya Perla11ac75e2011-12-13 00:58:50 +00001302 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303 return -EINVAL;
1304
1305 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001306 vi->max_tx_rate = vf_cfg->tx_rate;
1307 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001308 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1309 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001310 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301311 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001312
1313 return 0;
1314}
1315
Sathya Perla748b5392014-05-09 13:29:13 +05301316static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001319 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001320 int status = 0;
1321
Sathya Perla11ac75e2011-12-13 00:58:50 +00001322 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001323 return -EPERM;
1324
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001325 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001326 return -EINVAL;
1327
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001328 if (vlan || qos) {
1329 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301330 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001331 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1332 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001333 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001334 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301335 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1336 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001337 }
1338
Somnath Koturc5022242014-03-03 14:24:20 +05301339 if (!status)
1340 vf_cfg->vlan_tag = vlan;
1341 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301343 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001344 return status;
1345}
1346
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001347static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1348 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001349{
1350 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301351 struct device *dev = &adapter->pdev->dev;
1352 int percent_rate, status = 0;
1353 u16 link_speed = 0;
1354 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001355
Sathya Perla11ac75e2011-12-13 00:58:50 +00001356 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001357 return -EPERM;
1358
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001359 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001360 return -EINVAL;
1361
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001362 if (min_tx_rate)
1363 return -EINVAL;
1364
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301365 if (!max_tx_rate)
1366 goto config_qos;
1367
1368 status = be_cmd_link_status_query(adapter, &link_speed,
1369 &link_status, 0);
1370 if (status)
1371 goto err;
1372
1373 if (!link_status) {
1374 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1375 status = -EPERM;
1376 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001377 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001378
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301379 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1380 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1381 link_speed);
1382 status = -EINVAL;
1383 goto err;
1384 }
1385
1386 /* On Skyhawk the QOS setting must be done only as a % value */
1387 percent_rate = link_speed / 100;
1388 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1389 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1390 percent_rate);
1391 status = -EINVAL;
1392 goto err;
1393 }
1394
1395config_qos:
1396 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001397 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301398 goto err;
1399
1400 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1401 return 0;
1402
1403err:
1404 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1405 max_tx_rate, vf);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001406 return status;
1407}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301408static int be_set_vf_link_state(struct net_device *netdev, int vf,
1409 int link_state)
1410{
1411 struct be_adapter *adapter = netdev_priv(netdev);
1412 int status;
1413
1414 if (!sriov_enabled(adapter))
1415 return -EPERM;
1416
1417 if (vf >= adapter->num_vfs)
1418 return -EINVAL;
1419
1420 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1421 if (!status)
1422 adapter->vf_cfg[vf].plink_tracking = link_state;
1423
1424 return status;
1425}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001426
Sathya Perla2632baf2013-10-01 16:00:00 +05301427static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1428 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429{
Sathya Perla2632baf2013-10-01 16:00:00 +05301430 aic->rx_pkts_prev = rx_pkts;
1431 aic->tx_reqs_prev = tx_pkts;
1432 aic->jiffies = now;
1433}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001434
Sathya Perla2632baf2013-10-01 16:00:00 +05301435static void be_eqd_update(struct be_adapter *adapter)
1436{
1437 struct be_set_eqd set_eqd[MAX_EVT_QS];
1438 int eqd, i, num = 0, start;
1439 struct be_aic_obj *aic;
1440 struct be_eq_obj *eqo;
1441 struct be_rx_obj *rxo;
1442 struct be_tx_obj *txo;
1443 u64 rx_pkts, tx_pkts;
1444 ulong now;
1445 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001446
Sathya Perla2632baf2013-10-01 16:00:00 +05301447 for_all_evt_queues(adapter, eqo, i) {
1448 aic = &adapter->aic_obj[eqo->idx];
1449 if (!aic->enable) {
1450 if (aic->jiffies)
1451 aic->jiffies = 0;
1452 eqd = aic->et_eqd;
1453 goto modify_eqd;
1454 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455
Sathya Perla2632baf2013-10-01 16:00:00 +05301456 rxo = &adapter->rx_obj[eqo->idx];
1457 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001458 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301459 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001460 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001461
Sathya Perla2632baf2013-10-01 16:00:00 +05301462 txo = &adapter->tx_obj[eqo->idx];
1463 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001464 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301465 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001466 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001467
Sathya Perla4097f662009-03-24 16:40:13 -07001468
Sathya Perla2632baf2013-10-01 16:00:00 +05301469 /* Skip, if wrapped around or first calculation */
1470 now = jiffies;
1471 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1472 rx_pkts < aic->rx_pkts_prev ||
1473 tx_pkts < aic->tx_reqs_prev) {
1474 be_aic_update(aic, rx_pkts, tx_pkts, now);
1475 continue;
1476 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001477
Sathya Perla2632baf2013-10-01 16:00:00 +05301478 delta = jiffies_to_msecs(now - aic->jiffies);
1479 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1480 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1481 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001482
Sathya Perla2632baf2013-10-01 16:00:00 +05301483 if (eqd < 8)
1484 eqd = 0;
1485 eqd = min_t(u32, eqd, aic->max_eqd);
1486 eqd = max_t(u32, eqd, aic->min_eqd);
1487
1488 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001489modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301490 if (eqd != aic->prev_eqd) {
1491 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1492 set_eqd[num].eq_id = eqo->q.id;
1493 aic->prev_eqd = eqd;
1494 num++;
1495 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001496 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301497
1498 if (num)
1499 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001500}
1501
Sathya Perla3abcded2010-10-03 22:12:27 -07001502static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301503 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001504{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001505 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001506
Sathya Perlaab1594e2011-07-25 19:10:15 +00001507 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001508 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001509 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001510 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001511 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001512 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001513 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001514 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001515 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516}
1517
Sathya Perla2e588f82011-03-11 02:49:26 +00001518static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001519{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001520 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301521 * Also ignore ipcksm for ipv6 pkts
1522 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301524 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001525}
1526
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301527static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001529 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001531 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301532 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533
Sathya Perla3abcded2010-10-03 22:12:27 -07001534 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 BUG_ON(!rx_page_info->page);
1536
Sathya Perlae50287b2014-03-04 12:14:38 +05301537 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001538 dma_unmap_page(&adapter->pdev->dev,
1539 dma_unmap_addr(rx_page_info, bus),
1540 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301541 rx_page_info->last_frag = false;
1542 } else {
1543 dma_sync_single_for_cpu(&adapter->pdev->dev,
1544 dma_unmap_addr(rx_page_info, bus),
1545 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001546 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301548 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 atomic_dec(&rxq->used);
1550 return rx_page_info;
1551}
1552
1553/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554static void be_rx_compl_discard(struct be_rx_obj *rxo,
1555 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001558 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001560 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301561 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001562 put_page(page_info->page);
1563 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 }
1565}
1566
1567/*
1568 * skb_fill_rx_data forms a complete skb for an ether frame
1569 * indicated by rxcp.
1570 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1572 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001575 u16 i, j;
1576 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577 u8 *start;
1578
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301579 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 start = page_address(page_info->page) + page_info->page_offset;
1581 prefetch(start);
1582
1583 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001584 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 skb->len = curr_frag_len;
1587 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001588 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589 /* Complete packet has now been moved to data */
1590 put_page(page_info->page);
1591 skb->data_len = 0;
1592 skb->tail += curr_frag_len;
1593 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001594 hdr_len = ETH_HLEN;
1595 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001597 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 skb_shinfo(skb)->frags[0].page_offset =
1599 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301600 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1601 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001603 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604 skb->tail += hdr_len;
1605 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001606 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607
Sathya Perla2e588f82011-03-11 02:49:26 +00001608 if (rxcp->pkt_size <= rx_frag_size) {
1609 BUG_ON(rxcp->num_rcvd != 1);
1610 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 }
1612
1613 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001614 remaining = rxcp->pkt_size - curr_frag_len;
1615 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301616 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001617 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001619 /* Coalesce all frags from the same physical page in one slot */
1620 if (page_info->page_offset == 0) {
1621 /* Fresh page */
1622 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001623 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001624 skb_shinfo(skb)->frags[j].page_offset =
1625 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001626 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001627 skb_shinfo(skb)->nr_frags++;
1628 } else {
1629 put_page(page_info->page);
1630 }
1631
Eric Dumazet9e903e02011-10-18 21:00:24 +00001632 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633 skb->len += curr_frag_len;
1634 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001635 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001636 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001637 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001639 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640}
1641
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001642/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301643static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001644 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001647 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001649
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001650 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001651 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001652 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001653 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 return;
1655 }
1656
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001659 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001660 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001661 else
1662 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001664 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001665 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001666 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001667 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301668
1669 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301670 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671
Jiri Pirko343e43c2011-08-25 02:50:51 +00001672 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001673 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001674
1675 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676}
1677
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001678/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001679static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1680 struct napi_struct *napi,
1681 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001685 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001686 u16 remaining, curr_frag_len;
1687 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001688
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001689 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001690 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001691 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001692 return;
1693 }
1694
Sathya Perla2e588f82011-03-11 02:49:26 +00001695 remaining = rxcp->pkt_size;
1696 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301697 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
1699 curr_frag_len = min(remaining, rx_frag_size);
1700
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001701 /* Coalesce all frags from the same physical page in one slot */
1702 if (i == 0 || page_info->page_offset == 0) {
1703 /* First frag or Fresh page */
1704 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001705 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001706 skb_shinfo(skb)->frags[j].page_offset =
1707 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001708 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001709 } else {
1710 put_page(page_info->page);
1711 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001712 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001713 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 memset(page_info, 0, sizeof(*page_info));
1716 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001717 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001719 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001720 skb->len = rxcp->pkt_size;
1721 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001722 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001723 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001724 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001725 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301726
1727 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301728 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001729
Jiri Pirko343e43c2011-08-25 02:50:51 +00001730 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001731 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001732
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001733 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734}
1735
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001736static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1737 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738{
Sathya Perla2e588f82011-03-11 02:49:26 +00001739 rxcp->pkt_size =
1740 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1741 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1742 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1743 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001744 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001745 rxcp->ip_csum =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1747 rxcp->l4_csum =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1749 rxcp->ipv6 =
1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001751 rxcp->num_rcvd =
1752 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1753 rxcp->pkt_type =
1754 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001755 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001756 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001757 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001759 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1761 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001762 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301764 rxcp->tunneled =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001766}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001768static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1769 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001770{
1771 rxcp->pkt_size =
1772 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1773 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1774 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1775 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001776 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001777 rxcp->ip_csum =
1778 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1779 rxcp->l4_csum =
1780 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1781 rxcp->ipv6 =
1782 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001783 rxcp->num_rcvd =
1784 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1785 rxcp->pkt_type =
1786 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001787 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001788 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001789 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301790 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001791 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301792 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1793 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001794 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001795 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001796 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1797 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001798}
1799
1800static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1801{
1802 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1803 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1804 struct be_adapter *adapter = rxo->adapter;
1805
1806 /* For checking the valid bit it is Ok to use either definition as the
1807 * valid bit is at the same position in both v0 and v1 Rx compl */
1808 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809 return NULL;
1810
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001811 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001812 be_dws_le_to_cpu(compl, sizeof(*compl));
1813
1814 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001816 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001817 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001818
Somnath Koture38b1702013-05-29 22:55:56 +00001819 if (rxcp->ip_frag)
1820 rxcp->l4_csum = 0;
1821
Sathya Perla15d72182011-03-21 20:49:26 +00001822 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301823 /* In QNQ modes, if qnq bit is not set, then the packet was
1824 * tagged only with the transparent outer vlan-tag and must
1825 * not be treated as a vlan packet by host
1826 */
1827 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001828 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001829
Sathya Perla15d72182011-03-21 20:49:26 +00001830 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001831 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001832
Somnath Kotur939cf302011-08-18 21:51:49 -07001833 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301834 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001835 rxcp->vlanf = 0;
1836 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001837
1838 /* As the compl has been parsed, reset it; we wont touch it again */
1839 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840
Sathya Perla3abcded2010-10-03 22:12:27 -07001841 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 return rxcp;
1843}
1844
Eric Dumazet1829b082011-03-01 05:48:12 +00001845static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001850 gfp |= __GFP_COMP;
1851 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852}
1853
1854/*
1855 * Allocate a page, split it to fragments of size rx_frag_size and post as
1856 * receive buffers to BE
1857 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001858static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859{
Sathya Perla3abcded2010-10-03 22:12:27 -07001860 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001861 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001862 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001864 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865 struct be_eth_rx_d *rxd;
1866 u64 page_dmaaddr = 0, frag_dmaaddr;
1867 u32 posted, page_offset = 0;
1868
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1871 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001872 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001874 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 break;
1876 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001877 page_dmaaddr = dma_map_page(dev, pagep, 0,
1878 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001879 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001880 if (dma_mapping_error(dev, page_dmaaddr)) {
1881 put_page(pagep);
1882 pagep = NULL;
1883 rx_stats(rxo)->rx_post_fail++;
1884 break;
1885 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301886 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 } else {
1888 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301889 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301891 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893
1894 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301895 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1897 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
1899 /* Any space left in the current big page for another frag? */
1900 if ((page_offset + rx_frag_size + rx_frag_size) >
1901 adapter->big_page_size) {
1902 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301903 page_info->last_frag = true;
1904 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1905 } else {
1906 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001908
1909 prev_page_info = page_info;
1910 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301913
1914 /* Mark the last frag of a page when we break out of the above loop
1915 * with no more slots available in the RXQ
1916 */
1917 if (pagep) {
1918 prev_page_info->last_frag = true;
1919 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1920 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921
1922 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301924 if (rxo->rx_post_starved)
1925 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001926 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001927 } else if (atomic_read(&rxq->used) == 0) {
1928 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001929 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931}
1932
Sathya Perla5fb379e2009-06-18 00:02:59 +00001933static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1936
1937 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1938 return NULL;
1939
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001940 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1942
1943 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1944
1945 queue_tail_inc(tx_cq);
1946 return txcp;
1947}
1948
Sathya Perla3c8def92011-06-12 20:01:58 +00001949static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301950 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951{
Sathya Perla3c8def92011-06-12 20:01:58 +00001952 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001953 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001954 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001956 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1957 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001959 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001961 sent_skbs[txq->tail] = NULL;
1962
1963 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001964 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001966 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001968 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001969 unmap_tx_frag(&adapter->pdev->dev, wrb,
1970 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001971 unmap_skb_hdr = false;
1972
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 num_wrbs++;
1974 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001975 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001977 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001978 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979}
1980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981/* Return the number of events in the event queue */
1982static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001983{
1984 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001986
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 do {
1988 eqe = queue_tail_node(&eqo->q);
1989 if (eqe->evt == 0)
1990 break;
1991
1992 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001993 eqe->evt = 0;
1994 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 queue_tail_inc(&eqo->q);
1996 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001997
1998 return num;
1999}
2000
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001/* Leaves the EQ is disarmed state */
2002static void be_eq_clean(struct be_eq_obj *eqo)
2003{
2004 int num = events_get(eqo);
2005
2006 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2007}
2008
2009static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010{
2011 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002012 struct be_queue_info *rxq = &rxo->q;
2013 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002014 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002015 struct be_adapter *adapter = rxo->adapter;
2016 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017
Sathya Perlad23e9462012-12-17 19:38:51 +00002018 /* Consume pending rx completions.
2019 * Wait for the flush completion (identified by zero num_rcvd)
2020 * to arrive. Notify CQ even when there are no more CQ entries
2021 * for HW to flush partially coalesced CQ entries.
2022 * In Lancer, there is no need to wait for flush compl.
2023 */
2024 for (;;) {
2025 rxcp = be_rx_compl_get(rxo);
2026 if (rxcp == NULL) {
2027 if (lancer_chip(adapter))
2028 break;
2029
2030 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2031 dev_warn(&adapter->pdev->dev,
2032 "did not receive flush compl\n");
2033 break;
2034 }
2035 be_cq_notify(adapter, rx_cq->id, true, 0);
2036 mdelay(1);
2037 } else {
2038 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002039 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002040 if (rxcp->num_rcvd == 0)
2041 break;
2042 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043 }
2044
Sathya Perlad23e9462012-12-17 19:38:51 +00002045 /* After cleanup, leave the CQ in unarmed state */
2046 be_cq_notify(adapter, rx_cq->id, false, 0);
2047
2048 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302049 while (atomic_read(&rxq->used) > 0) {
2050 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 put_page(page_info->page);
2052 memset(page_info, 0, sizeof(*page_info));
2053 }
2054 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002055 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056}
2057
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002058static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002060 struct be_tx_obj *txo;
2061 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002062 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002063 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002064 struct sk_buff *sent_skb;
2065 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002066 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302068 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002069 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002070 pending_txqs = adapter->num_tx_qs;
2071
2072 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302073 cmpl = 0;
2074 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002075 txq = &txo->q;
2076 while ((txcp = be_tx_compl_get(&txo->cq))) {
2077 end_idx =
2078 AMAP_GET_BITS(struct amap_eth_tx_compl,
2079 wrb_index, txcp);
2080 num_wrbs += be_tx_compl_process(adapter, txo,
2081 end_idx);
2082 cmpl++;
2083 }
2084 if (cmpl) {
2085 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2086 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302087 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002088 }
2089 if (atomic_read(&txq->used) == 0)
2090 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002091 }
2092
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302093 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002094 break;
2095
2096 mdelay(1);
2097 } while (true);
2098
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002099 for_all_tx_queues(adapter, txo, i) {
2100 txq = &txo->q;
2101 if (atomic_read(&txq->used))
2102 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2103 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002104
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002105 /* free posted tx for which compls will never arrive */
2106 while (atomic_read(&txq->used)) {
2107 sent_skb = txo->sent_skb_list[txq->tail];
2108 end_idx = txq->tail;
2109 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2110 &dummy_wrb);
2111 index_adv(&end_idx, num_wrbs - 1, txq->len);
2112 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2113 atomic_sub(num_wrbs, &txq->used);
2114 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002115 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116}
2117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118static void be_evt_queues_destroy(struct be_adapter *adapter)
2119{
2120 struct be_eq_obj *eqo;
2121 int i;
2122
2123 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002124 if (eqo->q.created) {
2125 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302127 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302128 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002129 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130 be_queue_free(adapter, &eqo->q);
2131 }
2132}
2133
2134static int be_evt_queues_create(struct be_adapter *adapter)
2135{
2136 struct be_queue_info *eq;
2137 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302138 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139 int i, rc;
2140
Sathya Perla92bf14a2013-08-27 16:57:32 +05302141 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2142 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143
2144 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302145 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2146 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302147 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302148 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 eqo->adapter = adapter;
2150 eqo->tx_budget = BE_TX_BUDGET;
2151 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302152 aic->max_eqd = BE_MAX_EQD;
2153 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154
2155 eq = &eqo->q;
2156 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302157 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 if (rc)
2159 return rc;
2160
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302161 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 if (rc)
2163 return rc;
2164 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002165 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166}
2167
Sathya Perla5fb379e2009-06-18 00:02:59 +00002168static void be_mcc_queues_destroy(struct be_adapter *adapter)
2169{
2170 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002171
Sathya Perla8788fdc2009-07-27 22:52:03 +00002172 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002173 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002174 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002175 be_queue_free(adapter, q);
2176
Sathya Perla8788fdc2009-07-27 22:52:03 +00002177 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002179 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180 be_queue_free(adapter, q);
2181}
2182
2183/* Must be called only after TX qs are created as MCC shares TX EQ */
2184static int be_mcc_queues_create(struct be_adapter *adapter)
2185{
2186 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187
Sathya Perla8788fdc2009-07-27 22:52:03 +00002188 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002189 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302190 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002191 goto err;
2192
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 /* Use the default EQ for MCC completions */
2194 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002195 goto mcc_cq_free;
2196
Sathya Perla8788fdc2009-07-27 22:52:03 +00002197 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2199 goto mcc_cq_destroy;
2200
Sathya Perla8788fdc2009-07-27 22:52:03 +00002201 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002202 goto mcc_q_free;
2203
2204 return 0;
2205
2206mcc_q_free:
2207 be_queue_free(adapter, q);
2208mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002209 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002210mcc_cq_free:
2211 be_queue_free(adapter, cq);
2212err:
2213 return -1;
2214}
2215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216static void be_tx_queues_destroy(struct be_adapter *adapter)
2217{
2218 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002219 struct be_tx_obj *txo;
2220 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Sathya Perla3c8def92011-06-12 20:01:58 +00002222 for_all_tx_queues(adapter, txo, i) {
2223 q = &txo->q;
2224 if (q->created)
2225 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2226 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227
Sathya Perla3c8def92011-06-12 20:01:58 +00002228 q = &txo->cq;
2229 if (q->created)
2230 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2231 be_queue_free(adapter, q);
2232 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233}
2234
Sathya Perla77071332013-08-27 16:57:34 +05302235static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002238 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302239 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240
Sathya Perla92bf14a2013-08-27 16:57:32 +05302241 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002242
Sathya Perla3c8def92011-06-12 20:01:58 +00002243 for_all_tx_queues(adapter, txo, i) {
2244 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2246 sizeof(struct be_eth_tx_compl));
2247 if (status)
2248 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249
John Stultz827da442013-10-07 15:51:58 -07002250 u64_stats_init(&txo->stats.sync);
2251 u64_stats_init(&txo->stats.sync_compl);
2252
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 /* If num_evt_qs is less than num_tx_qs, then more than
2254 * one txq share an eq
2255 */
2256 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2257 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2258 if (status)
2259 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2262 sizeof(struct be_eth_wrb));
2263 if (status)
2264 return status;
2265
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002266 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 if (status)
2268 return status;
2269 }
2270
Sathya Perlad3791422012-09-28 04:39:44 +00002271 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2272 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 return 0;
2274}
2275
2276static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277{
2278 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002279 struct be_rx_obj *rxo;
2280 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002283 q = &rxo->cq;
2284 if (q->created)
2285 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2286 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288}
2289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002291{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002293 struct be_rx_obj *rxo;
2294 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295
Sathya Perla92bf14a2013-08-27 16:57:32 +05302296 /* We can create as many RSS rings as there are EQs. */
2297 adapter->num_rx_qs = adapter->num_evt_qs;
2298
2299 /* We'll use RSS only if atleast 2 RSS rings are supported.
2300 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302302 if (adapter->num_rx_qs > 1)
2303 adapter->num_rx_qs++;
2304
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002306 for_all_rx_queues(adapter, rxo, i) {
2307 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 cq = &rxo->cq;
2309 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302310 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313
John Stultz827da442013-10-07 15:51:58 -07002314 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2316 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002319 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
Sathya Perlad3791422012-09-28 04:39:44 +00002321 dev_info(&adapter->pdev->dev,
2322 "created %d RSS queue(s) and 1 default RX queue\n",
2323 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002325}
2326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327static irqreturn_t be_intx(int irq, void *dev)
2328{
Sathya Perlae49cc342012-11-27 19:50:02 +00002329 struct be_eq_obj *eqo = dev;
2330 struct be_adapter *adapter = eqo->adapter;
2331 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002333 /* IRQ is not expected when NAPI is scheduled as the EQ
2334 * will not be armed.
2335 * But, this can happen on Lancer INTx where it takes
2336 * a while to de-assert INTx or in BE2 where occasionaly
2337 * an interrupt may be raised even when EQ is unarmed.
2338 * If NAPI is already scheduled, then counting & notifying
2339 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002340 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002341 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002342 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002343 __napi_schedule(&eqo->napi);
2344 if (num_evts)
2345 eqo->spurious_intr = 0;
2346 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002347 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002348
2349 /* Return IRQ_HANDLED only for the the first spurious intr
2350 * after a valid intr to stop the kernel from branding
2351 * this irq as a bad one!
2352 */
2353 if (num_evts || eqo->spurious_intr++ == 0)
2354 return IRQ_HANDLED;
2355 else
2356 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357}
2358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362
Sathya Perla0b545a62012-11-23 00:27:18 +00002363 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2364 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365 return IRQ_HANDLED;
2366}
2367
Sathya Perla2e588f82011-03-11 02:49:26 +00002368static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369{
Somnath Koture38b1702013-05-29 22:55:56 +00002370 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371}
2372
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302374 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375{
Sathya Perla3abcded2010-10-03 22:12:27 -07002376 struct be_adapter *adapter = rxo->adapter;
2377 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002378 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379 u32 work_done;
2380
2381 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 if (!rxcp)
2384 break;
2385
Sathya Perla12004ae2011-08-02 19:57:46 +00002386 /* Is it a flush compl that has no data */
2387 if (unlikely(rxcp->num_rcvd == 0))
2388 goto loop_continue;
2389
2390 /* Discard compl with partial DMA Lancer B0 */
2391 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002393 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002394 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002395
Sathya Perla12004ae2011-08-02 19:57:46 +00002396 /* On BE drop pkts that arrive due to imperfect filtering in
2397 * promiscuous mode on some skews
2398 */
2399 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302400 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002402 goto loop_continue;
2403 }
2404
Sathya Perla6384a4d2013-10-25 10:40:16 +05302405 /* Don't do gro when we're busy_polling */
2406 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002408 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302409 be_rx_compl_process(rxo, napi, rxcp);
2410
Sathya Perla12004ae2011-08-02 19:57:46 +00002411loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002412 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413 }
2414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 if (work_done) {
2416 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002417
Sathya Perla6384a4d2013-10-25 10:40:16 +05302418 /* When an rx-obj gets into post_starved state, just
2419 * let be_worker do the posting.
2420 */
2421 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2422 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002423 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426 return work_done;
2427}
2428
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2430 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002432 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002434
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 for (work_done = 0; work_done < budget; work_done++) {
2436 txcp = be_tx_compl_get(&txo->cq);
2437 if (!txcp)
2438 break;
2439 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302440 AMAP_GET_BITS(struct
2441 amap_eth_tx_compl,
2442 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 }
2444
2445 if (work_done) {
2446 be_cq_notify(adapter, txo->cq.id, true, work_done);
2447 atomic_sub(num_wrbs, &txo->q.used);
2448
2449 /* As Tx wrbs have been freed up, wake up netdev queue
2450 * if it was stopped due to lack of tx wrbs. */
2451 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302452 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002454 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002455
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2457 tx_stats(txo)->tx_compl += work_done;
2458 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2459 }
2460 return (work_done < budget); /* Done */
2461}
Sathya Perla3c8def92011-06-12 20:01:58 +00002462
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302463int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464{
2465 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2466 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002467 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302468 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002470
Sathya Perla0b545a62012-11-23 00:27:18 +00002471 num_evts = events_get(eqo);
2472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 /* Process all TXQs serviced by this EQ */
2474 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2475 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2476 eqo->tx_budget, i);
2477 if (!tx_done)
2478 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479 }
2480
Sathya Perla6384a4d2013-10-25 10:40:16 +05302481 if (be_lock_napi(eqo)) {
2482 /* This loop will iterate twice for EQ0 in which
2483 * completions of the last RXQ (default one) are also processed
2484 * For other EQs the loop iterates only once
2485 */
2486 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2487 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2488 max_work = max(work, max_work);
2489 }
2490 be_unlock_napi(eqo);
2491 } else {
2492 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002493 }
2494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002495 if (is_mcc_eqo(eqo))
2496 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002497
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498 if (max_work < budget) {
2499 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002500 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002501 } else {
2502 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002503 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002504 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002505 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506}
2507
Sathya Perla6384a4d2013-10-25 10:40:16 +05302508#ifdef CONFIG_NET_RX_BUSY_POLL
2509static int be_busy_poll(struct napi_struct *napi)
2510{
2511 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2512 struct be_adapter *adapter = eqo->adapter;
2513 struct be_rx_obj *rxo;
2514 int i, work = 0;
2515
2516 if (!be_lock_busy_poll(eqo))
2517 return LL_FLUSH_BUSY;
2518
2519 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2520 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2521 if (work)
2522 break;
2523 }
2524
2525 be_unlock_busy_poll(eqo);
2526 return work;
2527}
2528#endif
2529
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002530void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002531{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002532 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2533 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002534 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302535 bool error_detected = false;
2536 struct device *dev = &adapter->pdev->dev;
2537 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002538
Sathya Perlad23e9462012-12-17 19:38:51 +00002539 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002540 return;
2541
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002542 if (lancer_chip(adapter)) {
2543 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2544 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2545 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302546 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002547 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302548 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302549 adapter->hw_error = true;
2550 /* Do not log error messages if its a FW reset */
2551 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2552 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2553 dev_info(dev, "Firmware update in progress\n");
2554 } else {
2555 error_detected = true;
2556 dev_err(dev, "Error detected in the card\n");
2557 dev_err(dev, "ERR: sliport status 0x%x\n",
2558 sliport_status);
2559 dev_err(dev, "ERR: sliport error1 0x%x\n",
2560 sliport_err1);
2561 dev_err(dev, "ERR: sliport error2 0x%x\n",
2562 sliport_err2);
2563 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002564 }
2565 } else {
2566 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302567 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002568 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302569 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002570 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302571 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002572 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302573 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002574
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002575 ue_lo = (ue_lo & ~ue_lo_mask);
2576 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002577
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302578 /* On certain platforms BE hardware can indicate spurious UEs.
2579 * Allow HW to stop working completely in case of a real UE.
2580 * Hence not setting the hw_error for UE detection.
2581 */
2582
2583 if (ue_lo || ue_hi) {
2584 error_detected = true;
2585 dev_err(dev,
2586 "Unrecoverable Error detected in the adapter");
2587 dev_err(dev, "Please reboot server to recover");
2588 if (skyhawk_chip(adapter))
2589 adapter->hw_error = true;
2590 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2591 if (ue_lo & 1)
2592 dev_err(dev, "UE: %s bit set\n",
2593 ue_status_low_desc[i]);
2594 }
2595 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2596 if (ue_hi & 1)
2597 dev_err(dev, "UE: %s bit set\n",
2598 ue_status_hi_desc[i]);
2599 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302600 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002601 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302602 if (error_detected)
2603 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002604}
2605
Sathya Perla8d56ff12009-11-22 22:02:26 +00002606static void be_msix_disable(struct be_adapter *adapter)
2607{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002608 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002609 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002610 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302611 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002612 }
2613}
2614
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002615static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002617 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002618 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619
Sathya Perla92bf14a2013-08-27 16:57:32 +05302620 /* If RoCE is supported, program the max number of NIC vectors that
2621 * may be configured via set-channels, along with vectors needed for
2622 * RoCe. Else, just program the number we'll use initially.
2623 */
2624 if (be_roce_supported(adapter))
2625 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2626 2 * num_online_cpus());
2627 else
2628 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002629
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002630 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631 adapter->msix_entries[i].entry = i;
2632
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002633 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2634 MIN_MSIX_VECTORS, num_vec);
2635 if (num_vec < 0)
2636 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002637
Sathya Perla92bf14a2013-08-27 16:57:32 +05302638 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2639 adapter->num_msix_roce_vec = num_vec / 2;
2640 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2641 adapter->num_msix_roce_vec);
2642 }
2643
2644 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2645
2646 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2647 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002648 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002649
2650fail:
2651 dev_warn(dev, "MSIx enable failed\n");
2652
2653 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2654 if (!be_physfn(adapter))
2655 return num_vec;
2656 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002657}
2658
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002659static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302660 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002661{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302662 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002663}
2664
2665static int be_msix_register(struct be_adapter *adapter)
2666{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 struct net_device *netdev = adapter->netdev;
2668 struct be_eq_obj *eqo;
2669 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002670
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002671 for_all_evt_queues(adapter, eqo, i) {
2672 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2673 vec = be_msix_vec_get(adapter, eqo);
2674 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002675 if (status)
2676 goto err_msix;
2677 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002678
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002680err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002681 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2682 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2683 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302684 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002685 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686 return status;
2687}
2688
2689static int be_irq_register(struct be_adapter *adapter)
2690{
2691 struct net_device *netdev = adapter->netdev;
2692 int status;
2693
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002694 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695 status = be_msix_register(adapter);
2696 if (status == 0)
2697 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002698 /* INTx is not supported for VF */
2699 if (!be_physfn(adapter))
2700 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701 }
2702
Sathya Perlae49cc342012-11-27 19:50:02 +00002703 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002704 netdev->irq = adapter->pdev->irq;
2705 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002706 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707 if (status) {
2708 dev_err(&adapter->pdev->dev,
2709 "INTx request IRQ failed - err %d\n", status);
2710 return status;
2711 }
2712done:
2713 adapter->isr_registered = true;
2714 return 0;
2715}
2716
2717static void be_irq_unregister(struct be_adapter *adapter)
2718{
2719 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002720 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002721 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
2723 if (!adapter->isr_registered)
2724 return;
2725
2726 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002727 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002728 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002729 goto done;
2730 }
2731
2732 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 for_all_evt_queues(adapter, eqo, i)
2734 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002735
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736done:
2737 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002738}
2739
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002740static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002741{
2742 struct be_queue_info *q;
2743 struct be_rx_obj *rxo;
2744 int i;
2745
2746 for_all_rx_queues(adapter, rxo, i) {
2747 q = &rxo->q;
2748 if (q->created) {
2749 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002750 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002751 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002752 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002753 }
2754}
2755
Sathya Perla889cd4b2010-05-30 23:33:45 +00002756static int be_close(struct net_device *netdev)
2757{
2758 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 struct be_eq_obj *eqo;
2760 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002761
Kalesh APe1ad8e32014-04-14 16:12:41 +05302762 /* This protection is needed as be_close() may be called even when the
2763 * adapter is in cleared state (after eeh perm failure)
2764 */
2765 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2766 return 0;
2767
Parav Pandit045508a2012-03-26 14:27:13 +00002768 be_roce_dev_close(adapter);
2769
Ivan Veceradff345c52013-11-27 08:59:32 +01002770 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2771 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002772 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302773 be_disable_busy_poll(eqo);
2774 }
David S. Miller71237b62013-11-28 18:53:36 -05002775 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002776 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002777
2778 be_async_mcc_disable(adapter);
2779
2780 /* Wait for all pending tx completions to arrive so that
2781 * all tx skbs are freed.
2782 */
Sathya Perlafba87552013-05-08 02:05:50 +00002783 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302784 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002785
2786 be_rx_qs_destroy(adapter);
2787
Ajit Khaparded11a3472013-11-18 10:44:37 -06002788 for (i = 1; i < (adapter->uc_macs + 1); i++)
2789 be_cmd_pmac_del(adapter, adapter->if_handle,
2790 adapter->pmac_id[i], 0);
2791 adapter->uc_macs = 0;
2792
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002793 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002794 if (msix_enabled(adapter))
2795 synchronize_irq(be_msix_vec_get(adapter, eqo));
2796 else
2797 synchronize_irq(netdev->irq);
2798 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002799 }
2800
Sathya Perla889cd4b2010-05-30 23:33:45 +00002801 be_irq_unregister(adapter);
2802
Sathya Perla482c9e72011-06-29 23:33:17 +00002803 return 0;
2804}
2805
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002807{
2808 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002809 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302810 u8 rss_hkey[RSS_HASH_KEY_LEN];
2811 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002812
2813 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002814 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2815 sizeof(struct be_eth_rx_d));
2816 if (rc)
2817 return rc;
2818 }
2819
2820 /* The FW would like the default RXQ to be created first */
2821 rxo = default_rxo(adapter);
2822 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2823 adapter->if_handle, false, &rxo->rss_id);
2824 if (rc)
2825 return rc;
2826
2827 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002828 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002829 rx_frag_size, adapter->if_handle,
2830 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002831 if (rc)
2832 return rc;
2833 }
2834
2835 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302836 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2837 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002838 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302839 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002840 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302841 rss->rsstable[j + i] = rxo->rss_id;
2842 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002843 }
2844 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302845 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2846 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002847
2848 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302849 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2850 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302851 } else {
2852 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302853 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302854 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002855
Venkata Duvvurue2557872014-04-21 15:38:00 +05302856 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302857 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302858 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302859 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302860 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302861 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002862 }
2863
Venkata Duvvurue2557872014-04-21 15:38:00 +05302864 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2865
Sathya Perla482c9e72011-06-29 23:33:17 +00002866 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002868 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002869 return 0;
2870}
2871
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002872static int be_open(struct net_device *netdev)
2873{
2874 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002876 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002877 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002878 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002879 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002880
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002881 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002882 if (status)
2883 goto err;
2884
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002885 status = be_irq_register(adapter);
2886 if (status)
2887 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002888
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002889 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002890 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002892 for_all_tx_queues(adapter, txo, i)
2893 be_cq_notify(adapter, txo->cq.id, true, 0);
2894
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002895 be_async_mcc_enable(adapter);
2896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002897 for_all_evt_queues(adapter, eqo, i) {
2898 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302899 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2901 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002902 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002903
Sathya Perla323ff712012-09-28 04:39:43 +00002904 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002905 if (!status)
2906 be_link_status_update(adapter, link_status);
2907
Sathya Perlafba87552013-05-08 02:05:50 +00002908 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002909 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302910
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302911#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302912 if (skyhawk_chip(adapter))
2913 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302914#endif
2915
Sathya Perla889cd4b2010-05-30 23:33:45 +00002916 return 0;
2917err:
2918 be_close(adapter->netdev);
2919 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002920}
2921
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002922static int be_setup_wol(struct be_adapter *adapter, bool enable)
2923{
2924 struct be_dma_mem cmd;
2925 int status = 0;
2926 u8 mac[ETH_ALEN];
2927
2928 memset(mac, 0, ETH_ALEN);
2929
2930 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002931 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2932 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002933 if (cmd.va == NULL)
2934 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002935
2936 if (enable) {
2937 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302938 PCICFG_PM_CONTROL_OFFSET,
2939 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002940 if (status) {
2941 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002942 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002943 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2944 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002945 return status;
2946 }
2947 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302948 adapter->netdev->dev_addr,
2949 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002950 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2951 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2952 } else {
2953 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2954 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2955 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2956 }
2957
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002958 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002959 return status;
2960}
2961
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002962/*
2963 * Generate a seed MAC address from the PF MAC Address using jhash.
2964 * MAC Address for VFs are assigned incrementally starting from the seed.
2965 * These addresses are programmed in the ASIC by the PF and the VF driver
2966 * queries for the MAC address during its probe.
2967 */
Sathya Perla4c876612013-02-03 20:30:11 +00002968static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002969{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002970 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002971 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002972 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002973 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002974
2975 be_vf_eth_addr_generate(adapter, mac);
2976
Sathya Perla11ac75e2011-12-13 00:58:50 +00002977 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302978 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002979 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002980 vf_cfg->if_handle,
2981 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302982 else
2983 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2984 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002985
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002986 if (status)
2987 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302988 "Mac address assignment failed for VF %d\n",
2989 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002990 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002991 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002992
2993 mac[5] += 1;
2994 }
2995 return status;
2996}
2997
Sathya Perla4c876612013-02-03 20:30:11 +00002998static int be_vfs_mac_query(struct be_adapter *adapter)
2999{
3000 int status, vf;
3001 u8 mac[ETH_ALEN];
3002 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003003
3004 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303005 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3006 mac, vf_cfg->if_handle,
3007 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003008 if (status)
3009 return status;
3010 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3011 }
3012 return 0;
3013}
3014
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003015static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003016{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003017 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003018 u32 vf;
3019
Sathya Perla257a3fe2013-06-14 15:54:51 +05303020 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003021 dev_warn(&adapter->pdev->dev,
3022 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003023 goto done;
3024 }
3025
Sathya Perlab4c1df92013-05-08 02:05:47 +00003026 pci_disable_sriov(adapter->pdev);
3027
Sathya Perla11ac75e2011-12-13 00:58:50 +00003028 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303029 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003030 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3031 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303032 else
3033 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3034 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003035
Sathya Perla11ac75e2011-12-13 00:58:50 +00003036 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3037 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003038done:
3039 kfree(adapter->vf_cfg);
3040 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003041}
3042
Sathya Perla77071332013-08-27 16:57:34 +05303043static void be_clear_queues(struct be_adapter *adapter)
3044{
3045 be_mcc_queues_destroy(adapter);
3046 be_rx_cqs_destroy(adapter);
3047 be_tx_queues_destroy(adapter);
3048 be_evt_queues_destroy(adapter);
3049}
3050
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303051static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003052{
Sathya Perla191eb752012-02-23 18:50:13 +00003053 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3054 cancel_delayed_work_sync(&adapter->work);
3055 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3056 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303057}
3058
Somnath Koturb05004a2013-12-05 12:08:16 +05303059static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303060{
3061 int i;
3062
Somnath Koturb05004a2013-12-05 12:08:16 +05303063 if (adapter->pmac_id) {
3064 for (i = 0; i < (adapter->uc_macs + 1); i++)
3065 be_cmd_pmac_del(adapter, adapter->if_handle,
3066 adapter->pmac_id[i], 0);
3067 adapter->uc_macs = 0;
3068
3069 kfree(adapter->pmac_id);
3070 adapter->pmac_id = NULL;
3071 }
3072}
3073
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303074#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303075static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3076{
3077 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3078 be_cmd_manage_iface(adapter, adapter->if_handle,
3079 OP_CONVERT_TUNNEL_TO_NORMAL);
3080
3081 if (adapter->vxlan_port)
3082 be_cmd_set_vxlan_port(adapter, 0);
3083
3084 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3085 adapter->vxlan_port = 0;
3086}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303087#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303088
Somnath Koturb05004a2013-12-05 12:08:16 +05303089static int be_clear(struct be_adapter *adapter)
3090{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303091 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003092
Sathya Perla11ac75e2011-12-13 00:58:50 +00003093 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003094 be_vf_clear(adapter);
3095
Vasundhara Volambec84e62014-06-30 13:01:32 +05303096 /* Re-configure FW to distribute resources evenly across max-supported
3097 * number of VFs, only when VFs are not already enabled.
3098 */
3099 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3100 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3101 pci_sriov_get_totalvfs(adapter->pdev));
3102
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303103#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303104 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303105#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303106 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303107 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003108
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003109 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003110
Sathya Perla77071332013-08-27 16:57:34 +05303111 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003112
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003113 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303114 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003115 return 0;
3116}
3117
Sathya Perla4c876612013-02-03 20:30:11 +00003118static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003119{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303120 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003121 struct be_vf_cfg *vf_cfg;
3122 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003123 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003124
Sathya Perla4c876612013-02-03 20:30:11 +00003125 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3126 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003127
Sathya Perla4c876612013-02-03 20:30:11 +00003128 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303129 if (!BE3_chip(adapter)) {
3130 status = be_cmd_get_profile_config(adapter, &res,
3131 vf + 1);
3132 if (!status)
3133 cap_flags = res.if_cap_flags;
3134 }
Sathya Perla4c876612013-02-03 20:30:11 +00003135
3136 /* If a FW profile exists, then cap_flags are updated */
3137 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303138 BE_IF_FLAGS_BROADCAST |
3139 BE_IF_FLAGS_MULTICAST);
3140 status =
3141 be_cmd_if_create(adapter, cap_flags, en_flags,
3142 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003143 if (status)
3144 goto err;
3145 }
3146err:
3147 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003148}
3149
Sathya Perla39f1d942012-05-08 19:41:24 +00003150static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003151{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003152 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003153 int vf;
3154
Sathya Perla39f1d942012-05-08 19:41:24 +00003155 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3156 GFP_KERNEL);
3157 if (!adapter->vf_cfg)
3158 return -ENOMEM;
3159
Sathya Perla11ac75e2011-12-13 00:58:50 +00003160 for_all_vfs(adapter, vf_cfg, vf) {
3161 vf_cfg->if_handle = -1;
3162 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003163 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003164 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003165}
3166
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003167static int be_vf_setup(struct be_adapter *adapter)
3168{
Sathya Perla4c876612013-02-03 20:30:11 +00003169 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303170 struct be_vf_cfg *vf_cfg;
3171 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303172 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003173
Sathya Perla257a3fe2013-06-14 15:54:51 +05303174 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003175
3176 status = be_vf_setup_init(adapter);
3177 if (status)
3178 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003179
Sathya Perla4c876612013-02-03 20:30:11 +00003180 if (old_vfs) {
3181 for_all_vfs(adapter, vf_cfg, vf) {
3182 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3183 if (status)
3184 goto err;
3185 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003186
Sathya Perla4c876612013-02-03 20:30:11 +00003187 status = be_vfs_mac_query(adapter);
3188 if (status)
3189 goto err;
3190 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303191 status = be_vfs_if_create(adapter);
3192 if (status)
3193 goto err;
3194
Sathya Perla39f1d942012-05-08 19:41:24 +00003195 status = be_vf_eth_addr_config(adapter);
3196 if (status)
3197 goto err;
3198 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003199
Sathya Perla11ac75e2011-12-13 00:58:50 +00003200 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303201 /* Allow VFs to programs MAC/VLAN filters */
3202 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3203 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3204 status = be_cmd_set_fn_privileges(adapter,
3205 privileges |
3206 BE_PRIV_FILTMGMT,
3207 vf + 1);
3208 if (!status)
3209 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3210 vf);
3211 }
3212
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303213 /* Allow full available bandwidth */
3214 if (!old_vfs)
3215 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003216
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303217 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303218 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303219 be_cmd_set_logical_link_config(adapter,
3220 IFLA_VF_LINK_STATE_AUTO,
3221 vf+1);
3222 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003223 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003224
3225 if (!old_vfs) {
3226 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3227 if (status) {
3228 dev_err(dev, "SRIOV enable failed\n");
3229 adapter->num_vfs = 0;
3230 goto err;
3231 }
3232 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003233 return 0;
3234err:
Sathya Perla4c876612013-02-03 20:30:11 +00003235 dev_err(dev, "VF setup failed\n");
3236 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003237 return status;
3238}
3239
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303240/* Converting function_mode bits on BE3 to SH mc_type enums */
3241
3242static u8 be_convert_mc_type(u32 function_mode)
3243{
Suresh Reddy66064db2014-06-23 16:41:29 +05303244 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303245 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303246 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303247 return FLEX10;
3248 else if (function_mode & VNIC_MODE)
3249 return vNIC2;
3250 else if (function_mode & UMC_ENABLED)
3251 return UMC;
3252 else
3253 return MC_NONE;
3254}
3255
Sathya Perla92bf14a2013-08-27 16:57:32 +05303256/* On BE2/BE3 FW does not suggest the supported limits */
3257static void BEx_get_resources(struct be_adapter *adapter,
3258 struct be_resources *res)
3259{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303260 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303261
3262 if (be_physfn(adapter))
3263 res->max_uc_mac = BE_UC_PMAC_COUNT;
3264 else
3265 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3266
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303267 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3268
3269 if (be_is_mc(adapter)) {
3270 /* Assuming that there are 4 channels per port,
3271 * when multi-channel is enabled
3272 */
3273 if (be_is_qnq_mode(adapter))
3274 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3275 else
3276 /* In a non-qnq multichannel mode, the pvid
3277 * takes up one vlan entry
3278 */
3279 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3280 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303281 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303282 }
3283
Sathya Perla92bf14a2013-08-27 16:57:32 +05303284 res->max_mcast_mac = BE_MAX_MC;
3285
Vasundhara Volama5243da2014-03-11 18:53:07 +05303286 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3287 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3288 * *only* if it is RSS-capable.
3289 */
3290 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3291 !be_physfn(adapter) || (be_is_mc(adapter) &&
3292 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303293 res->max_tx_qs = 1;
3294 else
3295 res->max_tx_qs = BE3_MAX_TX_QS;
3296
3297 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3298 !use_sriov && be_physfn(adapter))
3299 res->max_rss_qs = (adapter->be3_native) ?
3300 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3301 res->max_rx_qs = res->max_rss_qs + 1;
3302
Suresh Reddye3dc8672014-01-06 13:02:25 +05303303 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303304 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303305 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3306 else
3307 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303308
3309 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3310 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3311 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3312}
3313
Sathya Perla30128032011-11-10 19:17:57 +00003314static void be_setup_init(struct be_adapter *adapter)
3315{
3316 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003317 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003318 adapter->if_handle = -1;
3319 adapter->be3_native = false;
3320 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003321 if (be_physfn(adapter))
3322 adapter->cmd_privileges = MAX_PRIVILEGES;
3323 else
3324 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003325}
3326
Vasundhara Volambec84e62014-06-30 13:01:32 +05303327static int be_get_sriov_config(struct be_adapter *adapter)
3328{
3329 struct device *dev = &adapter->pdev->dev;
3330 struct be_resources res = {0};
3331 int status, max_vfs, old_vfs;
3332
3333 status = be_cmd_get_profile_config(adapter, &res, 0);
3334 if (status)
3335 return status;
3336
3337 adapter->pool_res = res;
3338
3339 /* Some old versions of BE3 FW don't report max_vfs value */
3340 if (BE3_chip(adapter) && !res.max_vfs) {
3341 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3342 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3343 }
3344
3345 adapter->pool_res.max_vfs = res.max_vfs;
3346 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3347
3348 if (!be_max_vfs(adapter)) {
3349 if (num_vfs)
3350 dev_warn(dev, "device doesn't support SRIOV\n");
3351 adapter->num_vfs = 0;
3352 return 0;
3353 }
3354
3355 /* validate num_vfs module param */
3356 old_vfs = pci_num_vf(adapter->pdev);
3357 if (old_vfs) {
3358 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3359 if (old_vfs != num_vfs)
3360 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3361 adapter->num_vfs = old_vfs;
3362 } else {
3363 if (num_vfs > be_max_vfs(adapter)) {
3364 dev_info(dev, "Resources unavailable to init %d VFs\n",
3365 num_vfs);
3366 dev_info(dev, "Limiting to %d VFs\n",
3367 be_max_vfs(adapter));
3368 }
3369 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3370 }
3371
3372 return 0;
3373}
3374
Sathya Perla92bf14a2013-08-27 16:57:32 +05303375static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003376{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303377 struct device *dev = &adapter->pdev->dev;
3378 struct be_resources res = {0};
3379 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003380
Sathya Perla92bf14a2013-08-27 16:57:32 +05303381 if (BEx_chip(adapter)) {
3382 BEx_get_resources(adapter, &res);
3383 adapter->res = res;
3384 }
3385
Sathya Perla92bf14a2013-08-27 16:57:32 +05303386 /* For Lancer, SH etc read per-function resource limits from FW.
3387 * GET_FUNC_CONFIG returns per function guaranteed limits.
3388 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3389 */
Sathya Perla4c876612013-02-03 20:30:11 +00003390 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303391 status = be_cmd_get_func_config(adapter, &res);
3392 if (status)
3393 return status;
3394
3395 /* If RoCE may be enabled stash away half the EQs for RoCE */
3396 if (be_roce_supported(adapter))
3397 res.max_evt_qs /= 2;
3398 adapter->res = res;
3399
Sathya Perla92bf14a2013-08-27 16:57:32 +05303400 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3401 be_max_txqs(adapter), be_max_rxqs(adapter),
3402 be_max_rss(adapter), be_max_eqs(adapter),
3403 be_max_vfs(adapter));
3404 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3405 be_max_uc(adapter), be_max_mc(adapter),
3406 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003407 }
3408
Sathya Perla92bf14a2013-08-27 16:57:32 +05303409 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003410}
3411
Sathya Perla39f1d942012-05-08 19:41:24 +00003412static int be_get_config(struct be_adapter *adapter)
3413{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303414 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003415 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003416
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003417 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3418 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003419 &adapter->function_caps,
3420 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003421 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303422 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003423
Vasundhara Volam542963b2014-01-15 13:23:33 +05303424 if (be_physfn(adapter)) {
3425 status = be_cmd_get_active_profile(adapter, &profile_id);
3426 if (!status)
3427 dev_info(&adapter->pdev->dev,
3428 "Using profile 0x%x\n", profile_id);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303429
3430 status = be_get_sriov_config(adapter);
3431 if (status)
3432 return status;
3433
3434 /* When the HW is in SRIOV capable configuration, the PF-pool
3435 * resources are equally distributed across the max-number of
3436 * VFs. The user may request only a subset of the max-vfs to be
3437 * enabled. Based on num_vfs, redistribute the resources across
3438 * num_vfs so that each VF will have access to more number of
3439 * resources. This facility is not available in BE3 FW.
3440 * Also, this is done by FW in Lancer chip.
3441 */
3442 if (!pci_num_vf(adapter->pdev)) {
3443 status = be_cmd_set_sriov_config(adapter,
3444 adapter->pool_res,
3445 adapter->num_vfs);
3446 if (status)
3447 return status;
3448 }
Vasundhara Volam542963b2014-01-15 13:23:33 +05303449 }
3450
Sathya Perla92bf14a2013-08-27 16:57:32 +05303451 status = be_get_resources(adapter);
3452 if (status)
3453 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003454
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303455 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3456 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303457 if (!adapter->pmac_id)
3458 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003459
Sathya Perla92bf14a2013-08-27 16:57:32 +05303460 /* Sanitize cfg_num_qs based on HW and platform limits */
3461 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3462
3463 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003464}
3465
Sathya Perla95046b92013-07-23 15:25:02 +05303466static int be_mac_setup(struct be_adapter *adapter)
3467{
3468 u8 mac[ETH_ALEN];
3469 int status;
3470
3471 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3472 status = be_cmd_get_perm_mac(adapter, mac);
3473 if (status)
3474 return status;
3475
3476 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3477 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3478 } else {
3479 /* Maybe the HW was reset; dev_addr must be re-programmed */
3480 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3481 }
3482
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003483 /* For BE3-R VFs, the PF programs the initial MAC address */
3484 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3485 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3486 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303487 return 0;
3488}
3489
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303490static void be_schedule_worker(struct be_adapter *adapter)
3491{
3492 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3493 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3494}
3495
Sathya Perla77071332013-08-27 16:57:34 +05303496static int be_setup_queues(struct be_adapter *adapter)
3497{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303498 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303499 int status;
3500
3501 status = be_evt_queues_create(adapter);
3502 if (status)
3503 goto err;
3504
3505 status = be_tx_qs_create(adapter);
3506 if (status)
3507 goto err;
3508
3509 status = be_rx_cqs_create(adapter);
3510 if (status)
3511 goto err;
3512
3513 status = be_mcc_queues_create(adapter);
3514 if (status)
3515 goto err;
3516
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303517 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3518 if (status)
3519 goto err;
3520
3521 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3522 if (status)
3523 goto err;
3524
Sathya Perla77071332013-08-27 16:57:34 +05303525 return 0;
3526err:
3527 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3528 return status;
3529}
3530
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303531int be_update_queues(struct be_adapter *adapter)
3532{
3533 struct net_device *netdev = adapter->netdev;
3534 int status;
3535
3536 if (netif_running(netdev))
3537 be_close(netdev);
3538
3539 be_cancel_worker(adapter);
3540
3541 /* If any vectors have been shared with RoCE we cannot re-program
3542 * the MSIx table.
3543 */
3544 if (!adapter->num_msix_roce_vec)
3545 be_msix_disable(adapter);
3546
3547 be_clear_queues(adapter);
3548
3549 if (!msix_enabled(adapter)) {
3550 status = be_msix_enable(adapter);
3551 if (status)
3552 return status;
3553 }
3554
3555 status = be_setup_queues(adapter);
3556 if (status)
3557 return status;
3558
3559 be_schedule_worker(adapter);
3560
3561 if (netif_running(netdev))
3562 status = be_open(netdev);
3563
3564 return status;
3565}
3566
Sathya Perla5fb379e2009-06-18 00:02:59 +00003567static int be_setup(struct be_adapter *adapter)
3568{
Sathya Perla39f1d942012-05-08 19:41:24 +00003569 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303570 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003571 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003572
Sathya Perla30128032011-11-10 19:17:57 +00003573 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003574
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003575 if (!lancer_chip(adapter))
3576 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003577
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003578 status = be_get_config(adapter);
3579 if (status)
3580 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003581
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003582 status = be_msix_enable(adapter);
3583 if (status)
3584 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003585
Sathya Perla77071332013-08-27 16:57:34 +05303586 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3587 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3588 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3589 en_flags |= BE_IF_FLAGS_RSS;
3590 en_flags = en_flags & be_if_cap_flags(adapter);
3591 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3592 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003593 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003594 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303596 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3597 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303598 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303599 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003600 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003601 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003602
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003603 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003604
Sathya Perla95046b92013-07-23 15:25:02 +05303605 status = be_mac_setup(adapter);
3606 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003607 goto err;
3608
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003609 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003610
Somnath Koture9e2a902013-10-24 14:37:53 +05303611 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3612 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3613 adapter->fw_ver);
3614 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3615 }
3616
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003617 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003618 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003619
3620 be_set_rx_mode(adapter->netdev);
3621
Suresh Reddy76a9e082014-01-15 13:23:40 +05303622 be_cmd_get_acpi_wol_cap(adapter);
3623
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003624 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003625
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003626 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3627 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003628 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003629
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303630 if (be_physfn(adapter))
3631 be_cmd_set_logical_link_config(adapter,
3632 IFLA_VF_LINK_STATE_AUTO, 0);
3633
Vasundhara Volambec84e62014-06-30 13:01:32 +05303634 if (adapter->num_vfs)
3635 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003636
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003637 status = be_cmd_get_phy_info(adapter);
3638 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003639 adapter->phy.fc_autoneg = 1;
3640
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303641 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303642 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003643 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003644err:
3645 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646 return status;
3647}
3648
Ivan Vecera66268732011-12-08 01:31:21 +00003649#ifdef CONFIG_NET_POLL_CONTROLLER
3650static void be_netpoll(struct net_device *netdev)
3651{
3652 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003653 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003654 int i;
3655
Sathya Perlae49cc342012-11-27 19:50:02 +00003656 for_all_evt_queues(adapter, eqo, i) {
3657 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3658 napi_schedule(&eqo->napi);
3659 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003660
3661 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003662}
3663#endif
3664
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303665static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003666
Sathya Perla306f1342011-08-02 19:57:45 +00003667static bool phy_flashing_required(struct be_adapter *adapter)
3668{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003669 return (adapter->phy.phy_type == TN_8022 &&
3670 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003671}
3672
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003673static bool is_comp_in_ufi(struct be_adapter *adapter,
3674 struct flash_section_info *fsec, int type)
3675{
3676 int i = 0, img_type = 0;
3677 struct flash_section_info_g2 *fsec_g2 = NULL;
3678
Sathya Perlaca34fe32012-11-06 17:48:56 +00003679 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003680 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3681
3682 for (i = 0; i < MAX_FLASH_COMP; i++) {
3683 if (fsec_g2)
3684 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3685 else
3686 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3687
3688 if (img_type == type)
3689 return true;
3690 }
3691 return false;
3692
3693}
3694
Jingoo Han4188e7d2013-08-05 18:02:02 +09003695static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303696 int header_size,
3697 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003698{
3699 struct flash_section_info *fsec = NULL;
3700 const u8 *p = fw->data;
3701
3702 p += header_size;
3703 while (p < (fw->data + fw->size)) {
3704 fsec = (struct flash_section_info *)p;
3705 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3706 return fsec;
3707 p += 32;
3708 }
3709 return NULL;
3710}
3711
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303712static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3713 u32 img_offset, u32 img_size, int hdr_size,
3714 u16 img_optype, bool *crc_match)
3715{
3716 u32 crc_offset;
3717 int status;
3718 u8 crc[4];
3719
3720 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3721 if (status)
3722 return status;
3723
3724 crc_offset = hdr_size + img_offset + img_size - 4;
3725
3726 /* Skip flashing, if crc of flashed region matches */
3727 if (!memcmp(crc, p + crc_offset, 4))
3728 *crc_match = true;
3729 else
3730 *crc_match = false;
3731
3732 return status;
3733}
3734
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003735static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303736 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003737{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003738 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303739 u32 total_bytes, flash_op, num_bytes;
3740 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003741
3742 total_bytes = img_size;
3743 while (total_bytes) {
3744 num_bytes = min_t(u32, 32*1024, total_bytes);
3745
3746 total_bytes -= num_bytes;
3747
3748 if (!total_bytes) {
3749 if (optype == OPTYPE_PHY_FW)
3750 flash_op = FLASHROM_OPER_PHY_FLASH;
3751 else
3752 flash_op = FLASHROM_OPER_FLASH;
3753 } else {
3754 if (optype == OPTYPE_PHY_FW)
3755 flash_op = FLASHROM_OPER_PHY_SAVE;
3756 else
3757 flash_op = FLASHROM_OPER_SAVE;
3758 }
3759
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003760 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003761 img += num_bytes;
3762 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303763 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303764 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303765 optype == OPTYPE_PHY_FW)
3766 break;
3767 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003768 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003769 }
3770 return 0;
3771}
3772
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003773/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003774static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303775 const struct firmware *fw,
3776 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003777{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003778 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303779 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003780 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303781 int status, i, filehdr_size, num_comp;
3782 const struct flash_comp *pflashcomp;
3783 bool crc_match;
3784 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003785
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003786 struct flash_comp gen3_flash_types[] = {
3787 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3788 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3789 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3790 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3791 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3792 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3793 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3794 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3795 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3796 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3797 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3798 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3799 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3800 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3801 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3802 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3803 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3804 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3805 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3806 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003807 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003808
3809 struct flash_comp gen2_flash_types[] = {
3810 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3811 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3812 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3813 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3814 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3815 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3816 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3817 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3818 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3819 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3820 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3821 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3822 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3823 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3824 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3825 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003826 };
3827
Sathya Perlaca34fe32012-11-06 17:48:56 +00003828 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003829 pflashcomp = gen3_flash_types;
3830 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003831 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003832 } else {
3833 pflashcomp = gen2_flash_types;
3834 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003835 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003836 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003837
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003838 /* Get flash section info*/
3839 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3840 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303841 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003842 return -1;
3843 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003844 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003845 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003846 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003847
3848 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3849 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3850 continue;
3851
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003852 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3853 !phy_flashing_required(adapter))
3854 continue;
3855
3856 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303857 status = be_check_flash_crc(adapter, fw->data,
3858 pflashcomp[i].offset,
3859 pflashcomp[i].size,
3860 filehdr_size +
3861 img_hdrs_size,
3862 OPTYPE_REDBOOT, &crc_match);
3863 if (status) {
3864 dev_err(dev,
3865 "Could not get CRC for 0x%x region\n",
3866 pflashcomp[i].optype);
3867 continue;
3868 }
3869
3870 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003871 continue;
3872 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003873
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303874 p = fw->data + filehdr_size + pflashcomp[i].offset +
3875 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003876 if (p + pflashcomp[i].size > fw->data + fw->size)
3877 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003878
3879 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303880 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003881 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303882 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003883 pflashcomp[i].img_type);
3884 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003885 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003886 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003887 return 0;
3888}
3889
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303890static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3891{
3892 u32 img_type = le32_to_cpu(fsec_entry.type);
3893 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3894
3895 if (img_optype != 0xFFFF)
3896 return img_optype;
3897
3898 switch (img_type) {
3899 case IMAGE_FIRMWARE_iSCSI:
3900 img_optype = OPTYPE_ISCSI_ACTIVE;
3901 break;
3902 case IMAGE_BOOT_CODE:
3903 img_optype = OPTYPE_REDBOOT;
3904 break;
3905 case IMAGE_OPTION_ROM_ISCSI:
3906 img_optype = OPTYPE_BIOS;
3907 break;
3908 case IMAGE_OPTION_ROM_PXE:
3909 img_optype = OPTYPE_PXE_BIOS;
3910 break;
3911 case IMAGE_OPTION_ROM_FCoE:
3912 img_optype = OPTYPE_FCOE_BIOS;
3913 break;
3914 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3915 img_optype = OPTYPE_ISCSI_BACKUP;
3916 break;
3917 case IMAGE_NCSI:
3918 img_optype = OPTYPE_NCSI_FW;
3919 break;
3920 case IMAGE_FLASHISM_JUMPVECTOR:
3921 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3922 break;
3923 case IMAGE_FIRMWARE_PHY:
3924 img_optype = OPTYPE_SH_PHY_FW;
3925 break;
3926 case IMAGE_REDBOOT_DIR:
3927 img_optype = OPTYPE_REDBOOT_DIR;
3928 break;
3929 case IMAGE_REDBOOT_CONFIG:
3930 img_optype = OPTYPE_REDBOOT_CONFIG;
3931 break;
3932 case IMAGE_UFI_DIR:
3933 img_optype = OPTYPE_UFI_DIR;
3934 break;
3935 default:
3936 break;
3937 }
3938
3939 return img_optype;
3940}
3941
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003942static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303943 const struct firmware *fw,
3944 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003945{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003946 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303947 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003948 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303949 u32 img_offset, img_size, img_type;
3950 int status, i, filehdr_size;
3951 bool crc_match, old_fw_img;
3952 u16 img_optype;
3953 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003954
3955 filehdr_size = sizeof(struct flash_file_hdr_g3);
3956 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3957 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303958 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003959 return -1;
3960 }
3961
3962 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3963 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3964 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303965 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3966 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3967 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003968
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303969 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003970 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303971 /* Don't bother verifying CRC if an old FW image is being
3972 * flashed
3973 */
3974 if (old_fw_img)
3975 goto flash;
3976
3977 status = be_check_flash_crc(adapter, fw->data, img_offset,
3978 img_size, filehdr_size +
3979 img_hdrs_size, img_optype,
3980 &crc_match);
3981 /* The current FW image on the card does not recognize the new
3982 * FLASH op_type. The FW download is partially complete.
3983 * Reboot the server now to enable FW image to recognize the
3984 * new FLASH op_type. To complete the remaining process,
3985 * download the same FW again after the reboot.
3986 */
Kalesh AP4c600052014-05-30 19:06:26 +05303987 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3988 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303989 dev_err(dev, "Flash incomplete. Reset the server\n");
3990 dev_err(dev, "Download FW image again after reset\n");
3991 return -EAGAIN;
3992 } else if (status) {
3993 dev_err(dev, "Could not get CRC for 0x%x region\n",
3994 img_optype);
3995 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003996 }
3997
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303998 if (crc_match)
3999 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004000
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304001flash:
4002 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004003 if (p + img_size > fw->data + fw->size)
4004 return -1;
4005
4006 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304007 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4008 * UFI_DIR region
4009 */
Kalesh AP4c600052014-05-30 19:06:26 +05304010 if (old_fw_img &&
4011 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4012 (img_optype == OPTYPE_UFI_DIR &&
4013 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304014 continue;
4015 } else if (status) {
4016 dev_err(dev, "Flashing section type 0x%x failed\n",
4017 img_type);
4018 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004019 }
4020 }
4021 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004022}
4023
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004024static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304025 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004026{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004027#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4028#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4029 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004030 const u8 *data_ptr = NULL;
4031 u8 *dest_image_ptr = NULL;
4032 size_t image_size = 0;
4033 u32 chunk_size = 0;
4034 u32 data_written = 0;
4035 u32 offset = 0;
4036 int status = 0;
4037 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004038 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004039
4040 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4041 dev_err(&adapter->pdev->dev,
4042 "FW Image not properly aligned. "
4043 "Length must be 4 byte aligned.\n");
4044 status = -EINVAL;
4045 goto lancer_fw_exit;
4046 }
4047
4048 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4049 + LANCER_FW_DOWNLOAD_CHUNK;
4050 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004051 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004052 if (!flash_cmd.va) {
4053 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004054 goto lancer_fw_exit;
4055 }
4056
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004057 dest_image_ptr = flash_cmd.va +
4058 sizeof(struct lancer_cmd_req_write_object);
4059 image_size = fw->size;
4060 data_ptr = fw->data;
4061
4062 while (image_size) {
4063 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4064
4065 /* Copy the image chunk content. */
4066 memcpy(dest_image_ptr, data_ptr, chunk_size);
4067
4068 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004069 chunk_size, offset,
4070 LANCER_FW_DOWNLOAD_LOCATION,
4071 &data_written, &change_status,
4072 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004073 if (status)
4074 break;
4075
4076 offset += data_written;
4077 data_ptr += data_written;
4078 image_size -= data_written;
4079 }
4080
4081 if (!status) {
4082 /* Commit the FW written */
4083 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004084 0, offset,
4085 LANCER_FW_DOWNLOAD_LOCATION,
4086 &data_written, &change_status,
4087 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004088 }
4089
4090 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05304091 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004092 if (status) {
4093 dev_err(&adapter->pdev->dev,
4094 "Firmware load error. "
4095 "Status code: 0x%x Additional Status: 0x%x\n",
4096 status, add_status);
4097 goto lancer_fw_exit;
4098 }
4099
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004100 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05304101 dev_info(&adapter->pdev->dev,
4102 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004103 status = lancer_physdev_ctrl(adapter,
4104 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105 if (status) {
4106 dev_err(&adapter->pdev->dev,
4107 "Adapter busy for FW reset.\n"
4108 "New FW will not be active.\n");
4109 goto lancer_fw_exit;
4110 }
4111 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05304112 dev_err(&adapter->pdev->dev,
4113 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004114 }
4115
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004116 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4117lancer_fw_exit:
4118 return status;
4119}
4120
Sathya Perlaca34fe32012-11-06 17:48:56 +00004121#define UFI_TYPE2 2
4122#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004123#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004124#define UFI_TYPE4 4
4125static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004126 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004127{
4128 if (fhdr == NULL)
4129 goto be_get_ufi_exit;
4130
Sathya Perlaca34fe32012-11-06 17:48:56 +00004131 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4132 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004133 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4134 if (fhdr->asic_type_rev == 0x10)
4135 return UFI_TYPE3R;
4136 else
4137 return UFI_TYPE3;
4138 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004139 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004140
4141be_get_ufi_exit:
4142 dev_err(&adapter->pdev->dev,
4143 "UFI and Interface are not compatible for flashing\n");
4144 return -1;
4145}
4146
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004147static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4148{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004149 struct flash_file_hdr_g3 *fhdr3;
4150 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004151 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004152 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004153 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004154
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004155 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004156 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4157 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004158 if (!flash_cmd.va) {
4159 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004160 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004161 }
4162
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004163 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004164 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004165
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004166 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004167
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004168 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4169 for (i = 0; i < num_imgs; i++) {
4170 img_hdr_ptr = (struct image_hdr *)(fw->data +
4171 (sizeof(struct flash_file_hdr_g3) +
4172 i * sizeof(struct image_hdr)));
4173 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004174 switch (ufi_type) {
4175 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004176 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304177 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004178 break;
4179 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004180 status = be_flash_BEx(adapter, fw, &flash_cmd,
4181 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004182 break;
4183 case UFI_TYPE3:
4184 /* Do not flash this ufi on BE3-R cards */
4185 if (adapter->asic_rev < 0x10)
4186 status = be_flash_BEx(adapter, fw,
4187 &flash_cmd,
4188 num_imgs);
4189 else {
4190 status = -1;
4191 dev_err(&adapter->pdev->dev,
4192 "Can't load BE3 UFI on BE3R\n");
4193 }
4194 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004195 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004196 }
4197
Sathya Perlaca34fe32012-11-06 17:48:56 +00004198 if (ufi_type == UFI_TYPE2)
4199 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004200 else if (ufi_type == -1)
4201 status = -1;
4202
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004203 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4204 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004205 if (status) {
4206 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004207 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004208 }
4209
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004210 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004211
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004212be_fw_exit:
4213 return status;
4214}
4215
4216int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4217{
4218 const struct firmware *fw;
4219 int status;
4220
4221 if (!netif_running(adapter->netdev)) {
4222 dev_err(&adapter->pdev->dev,
4223 "Firmware load not allowed (interface is down)\n");
4224 return -1;
4225 }
4226
4227 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4228 if (status)
4229 goto fw_exit;
4230
4231 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4232
4233 if (lancer_chip(adapter))
4234 status = lancer_fw_download(adapter, fw);
4235 else
4236 status = be_fw_download(adapter, fw);
4237
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004238 if (!status)
4239 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4240 adapter->fw_on_flash);
4241
Ajit Khaparde84517482009-09-04 03:12:16 +00004242fw_exit:
4243 release_firmware(fw);
4244 return status;
4245}
4246
Sathya Perla748b5392014-05-09 13:29:13 +05304247static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004248{
4249 struct be_adapter *adapter = netdev_priv(dev);
4250 struct nlattr *attr, *br_spec;
4251 int rem;
4252 int status = 0;
4253 u16 mode = 0;
4254
4255 if (!sriov_enabled(adapter))
4256 return -EOPNOTSUPP;
4257
4258 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4259
4260 nla_for_each_nested(attr, br_spec, rem) {
4261 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4262 continue;
4263
4264 mode = nla_get_u16(attr);
4265 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4266 return -EINVAL;
4267
4268 status = be_cmd_set_hsw_config(adapter, 0, 0,
4269 adapter->if_handle,
4270 mode == BRIDGE_MODE_VEPA ?
4271 PORT_FWD_TYPE_VEPA :
4272 PORT_FWD_TYPE_VEB);
4273 if (status)
4274 goto err;
4275
4276 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4277 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4278
4279 return status;
4280 }
4281err:
4282 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4283 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4284
4285 return status;
4286}
4287
4288static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304289 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004290{
4291 struct be_adapter *adapter = netdev_priv(dev);
4292 int status = 0;
4293 u8 hsw_mode;
4294
4295 if (!sriov_enabled(adapter))
4296 return 0;
4297
4298 /* BE and Lancer chips support VEB mode only */
4299 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4300 hsw_mode = PORT_FWD_TYPE_VEB;
4301 } else {
4302 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4303 adapter->if_handle, &hsw_mode);
4304 if (status)
4305 return 0;
4306 }
4307
4308 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4309 hsw_mode == PORT_FWD_TYPE_VEPA ?
4310 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4311}
4312
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304313#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304314static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4315 __be16 port)
4316{
4317 struct be_adapter *adapter = netdev_priv(netdev);
4318 struct device *dev = &adapter->pdev->dev;
4319 int status;
4320
4321 if (lancer_chip(adapter) || BEx_chip(adapter))
4322 return;
4323
4324 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4325 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4326 be16_to_cpu(port));
4327 dev_info(dev,
4328 "Only one UDP port supported for VxLAN offloads\n");
4329 return;
4330 }
4331
4332 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4333 OP_CONVERT_NORMAL_TO_TUNNEL);
4334 if (status) {
4335 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4336 goto err;
4337 }
4338
4339 status = be_cmd_set_vxlan_port(adapter, port);
4340 if (status) {
4341 dev_warn(dev, "Failed to add VxLAN port\n");
4342 goto err;
4343 }
4344 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4345 adapter->vxlan_port = port;
4346
4347 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4348 be16_to_cpu(port));
4349 return;
4350err:
4351 be_disable_vxlan_offloads(adapter);
4352 return;
4353}
4354
4355static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4356 __be16 port)
4357{
4358 struct be_adapter *adapter = netdev_priv(netdev);
4359
4360 if (lancer_chip(adapter) || BEx_chip(adapter))
4361 return;
4362
4363 if (adapter->vxlan_port != port)
4364 return;
4365
4366 be_disable_vxlan_offloads(adapter);
4367
4368 dev_info(&adapter->pdev->dev,
4369 "Disabled VxLAN offloads for UDP port %d\n",
4370 be16_to_cpu(port));
4371}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304372#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304373
stephen hemmingere5686ad2012-01-05 19:10:25 +00004374static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004375 .ndo_open = be_open,
4376 .ndo_stop = be_close,
4377 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004378 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004379 .ndo_set_mac_address = be_mac_addr_set,
4380 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004381 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004382 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004383 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4384 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004385 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004386 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004387 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004388 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304389 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004390#ifdef CONFIG_NET_POLL_CONTROLLER
4391 .ndo_poll_controller = be_netpoll,
4392#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004393 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4394 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304395#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304396 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304397#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304398#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304399 .ndo_add_vxlan_port = be_add_vxlan_port,
4400 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304401#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004402};
4403
4404static void be_netdev_init(struct net_device *netdev)
4405{
4406 struct be_adapter *adapter = netdev_priv(netdev);
4407
Sathya Perlac9c47142014-03-27 10:46:19 +05304408 if (skyhawk_chip(adapter)) {
4409 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4410 NETIF_F_TSO | NETIF_F_TSO6 |
4411 NETIF_F_GSO_UDP_TUNNEL;
4412 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4413 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004414 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004415 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004416 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004417 if (be_multi_rxq(adapter))
4418 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004419
4420 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004421 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004422
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004423 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004424 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004425
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004426 netdev->priv_flags |= IFF_UNICAST_FLT;
4427
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004428 netdev->flags |= IFF_MULTICAST;
4429
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004430 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004432 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004434 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004435}
4436
4437static void be_unmap_pci_bars(struct be_adapter *adapter)
4438{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004439 if (adapter->csr)
4440 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004441 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004442 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004443}
4444
Sathya Perlace66f782012-11-06 17:48:58 +00004445static int db_bar(struct be_adapter *adapter)
4446{
4447 if (lancer_chip(adapter) || !be_physfn(adapter))
4448 return 0;
4449 else
4450 return 4;
4451}
4452
4453static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004454{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004455 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004456 adapter->roce_db.size = 4096;
4457 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4458 db_bar(adapter));
4459 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4460 db_bar(adapter));
4461 }
Parav Pandit045508a2012-03-26 14:27:13 +00004462 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004463}
4464
4465static int be_map_pci_bars(struct be_adapter *adapter)
4466{
4467 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004468
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004469 if (BEx_chip(adapter) && be_physfn(adapter)) {
4470 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4471 if (adapter->csr == NULL)
4472 return -ENOMEM;
4473 }
4474
Sathya Perlace66f782012-11-06 17:48:58 +00004475 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004476 if (addr == NULL)
4477 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004478 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004479
4480 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004481 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004482
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004483pci_map_err:
4484 be_unmap_pci_bars(adapter);
4485 return -ENOMEM;
4486}
4487
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004488static void be_ctrl_cleanup(struct be_adapter *adapter)
4489{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004490 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004491
4492 be_unmap_pci_bars(adapter);
4493
4494 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004495 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4496 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004497
Sathya Perla5b8821b2011-08-02 19:57:44 +00004498 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004499 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004500 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4501 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004502}
4503
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004504static int be_ctrl_init(struct be_adapter *adapter)
4505{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004506 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4507 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004508 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004509 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004510 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004511
Sathya Perlace66f782012-11-06 17:48:58 +00004512 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4513 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4514 SLI_INTF_FAMILY_SHIFT;
4515 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517 status = be_map_pci_bars(adapter);
4518 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004519 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004520
4521 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004522 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4523 mbox_mem_alloc->size,
4524 &mbox_mem_alloc->dma,
4525 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004527 status = -ENOMEM;
4528 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004529 }
4530 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4531 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4532 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4533 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004534
Sathya Perla5b8821b2011-08-02 19:57:44 +00004535 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004536 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4537 rx_filter->size, &rx_filter->dma,
4538 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004539 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004540 status = -ENOMEM;
4541 goto free_mbox;
4542 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004543
Ivan Vecera29849612010-12-14 05:43:19 +00004544 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004545 spin_lock_init(&adapter->mcc_lock);
4546 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004547
Suresh Reddy5eeff632014-01-06 13:02:24 +05304548 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004549 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004550 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004551
4552free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004553 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4554 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004555
4556unmap_pci_bars:
4557 be_unmap_pci_bars(adapter);
4558
4559done:
4560 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561}
4562
4563static void be_stats_cleanup(struct be_adapter *adapter)
4564{
Sathya Perla3abcded2010-10-03 22:12:27 -07004565 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566
4567 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004568 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4569 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004570}
4571
4572static int be_stats_init(struct be_adapter *adapter)
4573{
Sathya Perla3abcded2010-10-03 22:12:27 -07004574 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575
Sathya Perlaca34fe32012-11-06 17:48:56 +00004576 if (lancer_chip(adapter))
4577 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4578 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004579 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004580 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004581 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004582 else
4583 /* ALL non-BE ASICs */
4584 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004585
Joe Perchesede23fa2013-08-26 22:45:23 -07004586 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4587 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004588 if (cmd->va == NULL)
4589 return -1;
4590 return 0;
4591}
4592
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004593static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004594{
4595 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004597 if (!adapter)
4598 return;
4599
Parav Pandit045508a2012-03-26 14:27:13 +00004600 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004601 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004602
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004603 cancel_delayed_work_sync(&adapter->func_recovery_work);
4604
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004605 unregister_netdev(adapter->netdev);
4606
Sathya Perla5fb379e2009-06-18 00:02:59 +00004607 be_clear(adapter);
4608
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004609 /* tell fw we're done with firing cmds */
4610 be_cmd_fw_clean(adapter);
4611
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004612 be_stats_cleanup(adapter);
4613
4614 be_ctrl_cleanup(adapter);
4615
Sathya Perlad6b6d982012-09-05 01:56:48 +00004616 pci_disable_pcie_error_reporting(pdev);
4617
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004618 pci_release_regions(pdev);
4619 pci_disable_device(pdev);
4620
4621 free_netdev(adapter->netdev);
4622}
4623
Sathya Perla39f1d942012-05-08 19:41:24 +00004624static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304626 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004627
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004628 status = be_cmd_get_cntl_attributes(adapter);
4629 if (status)
4630 return status;
4631
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004632 /* Must be a power of 2 or else MODULO will BUG_ON */
4633 adapter->be_get_temp_freq = 64;
4634
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304635 if (BEx_chip(adapter)) {
4636 level = be_cmd_get_fw_log_level(adapter);
4637 adapter->msg_enable =
4638 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4639 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004640
Sathya Perla92bf14a2013-08-27 16:57:32 +05304641 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004642 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004643}
4644
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004645static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004646{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004647 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004648 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004649
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004650 status = lancer_test_and_set_rdy_state(adapter);
4651 if (status)
4652 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004653
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004654 if (netif_running(adapter->netdev))
4655 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004656
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004657 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004658
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004659 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004660
4661 status = be_setup(adapter);
4662 if (status)
4663 goto err;
4664
4665 if (netif_running(adapter->netdev)) {
4666 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004667 if (status)
4668 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004669 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004670
Somnath Kotur4bebb562013-12-05 12:07:55 +05304671 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004672 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004673err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004674 if (status == -EAGAIN)
4675 dev_err(dev, "Waiting for resource provisioning\n");
4676 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304677 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004678
4679 return status;
4680}
4681
4682static void be_func_recovery_task(struct work_struct *work)
4683{
4684 struct be_adapter *adapter =
4685 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004686 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004687
4688 be_detect_error(adapter);
4689
4690 if (adapter->hw_error && lancer_chip(adapter)) {
4691
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004692 rtnl_lock();
4693 netif_device_detach(adapter->netdev);
4694 rtnl_unlock();
4695
4696 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004697 if (!status)
4698 netif_device_attach(adapter->netdev);
4699 }
4700
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004701 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4702 * no need to attempt further recovery.
4703 */
4704 if (!status || status == -EAGAIN)
4705 schedule_delayed_work(&adapter->func_recovery_work,
4706 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004707}
4708
4709static void be_worker(struct work_struct *work)
4710{
4711 struct be_adapter *adapter =
4712 container_of(work, struct be_adapter, work.work);
4713 struct be_rx_obj *rxo;
4714 int i;
4715
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004716 /* when interrupts are not yet enabled, just reap any pending
4717 * mcc completions */
4718 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004719 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004720 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004721 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004722 goto reschedule;
4723 }
4724
4725 if (!adapter->stats_cmd_sent) {
4726 if (lancer_chip(adapter))
4727 lancer_cmd_get_pport_stats(adapter,
4728 &adapter->stats_cmd);
4729 else
4730 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4731 }
4732
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304733 if (be_physfn(adapter) &&
4734 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004735 be_cmd_get_die_temperature(adapter);
4736
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004737 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304738 /* Replenish RX-queues starved due to memory
4739 * allocation failures.
4740 */
4741 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004742 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004743 }
4744
Sathya Perla2632baf2013-10-01 16:00:00 +05304745 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004746
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004747reschedule:
4748 adapter->work_counter++;
4749 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4750}
4751
Sathya Perla257a3fe2013-06-14 15:54:51 +05304752/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004753static bool be_reset_required(struct be_adapter *adapter)
4754{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304755 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004756}
4757
Sathya Perlad3791422012-09-28 04:39:44 +00004758static char *mc_name(struct be_adapter *adapter)
4759{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304760 char *str = ""; /* default */
4761
4762 switch (adapter->mc_type) {
4763 case UMC:
4764 str = "UMC";
4765 break;
4766 case FLEX10:
4767 str = "FLEX10";
4768 break;
4769 case vNIC1:
4770 str = "vNIC-1";
4771 break;
4772 case nPAR:
4773 str = "nPAR";
4774 break;
4775 case UFP:
4776 str = "UFP";
4777 break;
4778 case vNIC2:
4779 str = "vNIC-2";
4780 break;
4781 default:
4782 str = "";
4783 }
4784
4785 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004786}
4787
4788static inline char *func_name(struct be_adapter *adapter)
4789{
4790 return be_physfn(adapter) ? "PF" : "VF";
4791}
4792
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004793static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004794{
4795 int status = 0;
4796 struct be_adapter *adapter;
4797 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004798 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004799
4800 status = pci_enable_device(pdev);
4801 if (status)
4802 goto do_none;
4803
4804 status = pci_request_regions(pdev, DRV_NAME);
4805 if (status)
4806 goto disable_dev;
4807 pci_set_master(pdev);
4808
Sathya Perla7f640062012-06-05 19:37:20 +00004809 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004810 if (netdev == NULL) {
4811 status = -ENOMEM;
4812 goto rel_reg;
4813 }
4814 adapter = netdev_priv(netdev);
4815 adapter->pdev = pdev;
4816 pci_set_drvdata(pdev, adapter);
4817 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004818 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004819
Russell King4c15c242013-06-26 23:49:11 +01004820 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004821 if (!status) {
4822 netdev->features |= NETIF_F_HIGHDMA;
4823 } else {
Russell King4c15c242013-06-26 23:49:11 +01004824 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004825 if (status) {
4826 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4827 goto free_netdev;
4828 }
4829 }
4830
Ajit Khapardeea58c182013-10-18 16:06:24 -05004831 if (be_physfn(adapter)) {
4832 status = pci_enable_pcie_error_reporting(pdev);
4833 if (!status)
4834 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4835 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004836
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004837 status = be_ctrl_init(adapter);
4838 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004839 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004840
Sathya Perla2243e2e2009-11-22 22:02:03 +00004841 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004842 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004843 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004844 if (status)
4845 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004846 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004847
Sathya Perla39f1d942012-05-08 19:41:24 +00004848 if (be_reset_required(adapter)) {
4849 status = be_cmd_reset_function(adapter);
4850 if (status)
4851 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004852
Kalesh AP2d177be2013-04-28 22:22:29 +00004853 /* Wait for interrupts to quiesce after an FLR */
4854 msleep(100);
4855 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004856
4857 /* Allow interrupts for other ULPs running on NIC function */
4858 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004859
Kalesh AP2d177be2013-04-28 22:22:29 +00004860 /* tell fw we're ready to fire cmds */
4861 status = be_cmd_fw_init(adapter);
4862 if (status)
4863 goto ctrl_clean;
4864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004865 status = be_stats_init(adapter);
4866 if (status)
4867 goto ctrl_clean;
4868
Sathya Perla39f1d942012-05-08 19:41:24 +00004869 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004870 if (status)
4871 goto stats_clean;
4872
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004873 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004874 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004875 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004876
Sathya Perla5fb379e2009-06-18 00:02:59 +00004877 status = be_setup(adapter);
4878 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004879 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004880
Sathya Perla3abcded2010-10-03 22:12:27 -07004881 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004882 status = register_netdev(netdev);
4883 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004884 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004885
Parav Pandit045508a2012-03-26 14:27:13 +00004886 be_roce_dev_add(adapter);
4887
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004888 schedule_delayed_work(&adapter->func_recovery_work,
4889 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004890
4891 be_cmd_query_port_name(adapter, &port_name);
4892
Sathya Perlad3791422012-09-28 04:39:44 +00004893 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4894 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004895
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004896 return 0;
4897
Sathya Perla5fb379e2009-06-18 00:02:59 +00004898unsetup:
4899 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004900stats_clean:
4901 be_stats_cleanup(adapter);
4902ctrl_clean:
4903 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004904free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004905 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004906rel_reg:
4907 pci_release_regions(pdev);
4908disable_dev:
4909 pci_disable_device(pdev);
4910do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004911 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004912 return status;
4913}
4914
4915static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4916{
4917 struct be_adapter *adapter = pci_get_drvdata(pdev);
4918 struct net_device *netdev = adapter->netdev;
4919
Suresh Reddy76a9e082014-01-15 13:23:40 +05304920 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004921 be_setup_wol(adapter, true);
4922
Ajit Khaparded4360d62013-11-22 12:51:09 -06004923 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004924 cancel_delayed_work_sync(&adapter->func_recovery_work);
4925
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004926 netif_device_detach(netdev);
4927 if (netif_running(netdev)) {
4928 rtnl_lock();
4929 be_close(netdev);
4930 rtnl_unlock();
4931 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004932 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004933
4934 pci_save_state(pdev);
4935 pci_disable_device(pdev);
4936 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4937 return 0;
4938}
4939
4940static int be_resume(struct pci_dev *pdev)
4941{
4942 int status = 0;
4943 struct be_adapter *adapter = pci_get_drvdata(pdev);
4944 struct net_device *netdev = adapter->netdev;
4945
4946 netif_device_detach(netdev);
4947
4948 status = pci_enable_device(pdev);
4949 if (status)
4950 return status;
4951
Yijing Wang1ca01512013-06-27 20:53:42 +08004952 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004953 pci_restore_state(pdev);
4954
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304955 status = be_fw_wait_ready(adapter);
4956 if (status)
4957 return status;
4958
Ajit Khaparded4360d62013-11-22 12:51:09 -06004959 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004960 /* tell fw we're ready to fire cmds */
4961 status = be_cmd_fw_init(adapter);
4962 if (status)
4963 return status;
4964
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004965 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004966 if (netif_running(netdev)) {
4967 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004968 be_open(netdev);
4969 rtnl_unlock();
4970 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004971
4972 schedule_delayed_work(&adapter->func_recovery_work,
4973 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004974 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004975
Suresh Reddy76a9e082014-01-15 13:23:40 +05304976 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004977 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004979 return 0;
4980}
4981
Sathya Perla82456b02010-02-17 01:35:37 +00004982/*
4983 * An FLR will stop BE from DMAing any data.
4984 */
4985static void be_shutdown(struct pci_dev *pdev)
4986{
4987 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004988
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004989 if (!adapter)
4990 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004991
Sathya Perla0f4a6822011-03-21 20:49:28 +00004992 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004993 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004994
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004995 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004996
Ajit Khaparde57841862011-04-06 18:08:43 +00004997 be_cmd_reset_function(adapter);
4998
Sathya Perla82456b02010-02-17 01:35:37 +00004999 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005000}
5001
Sathya Perlacf588472010-02-14 21:22:01 +00005002static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305003 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005004{
5005 struct be_adapter *adapter = pci_get_drvdata(pdev);
5006 struct net_device *netdev = adapter->netdev;
5007
5008 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5009
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005010 if (!adapter->eeh_error) {
5011 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005012
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005013 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005014
Sathya Perlacf588472010-02-14 21:22:01 +00005015 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005016 netif_device_detach(netdev);
5017 if (netif_running(netdev))
5018 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005019 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005020
5021 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005022 }
Sathya Perlacf588472010-02-14 21:22:01 +00005023
5024 if (state == pci_channel_io_perm_failure)
5025 return PCI_ERS_RESULT_DISCONNECT;
5026
5027 pci_disable_device(pdev);
5028
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005029 /* The error could cause the FW to trigger a flash debug dump.
5030 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005031 * can cause it not to recover; wait for it to finish.
5032 * Wait only for first function as it is needed only once per
5033 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005034 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005035 if (pdev->devfn == 0)
5036 ssleep(30);
5037
Sathya Perlacf588472010-02-14 21:22:01 +00005038 return PCI_ERS_RESULT_NEED_RESET;
5039}
5040
5041static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5042{
5043 struct be_adapter *adapter = pci_get_drvdata(pdev);
5044 int status;
5045
5046 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005047
5048 status = pci_enable_device(pdev);
5049 if (status)
5050 return PCI_ERS_RESULT_DISCONNECT;
5051
5052 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005053 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005054 pci_restore_state(pdev);
5055
5056 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005057 dev_info(&adapter->pdev->dev,
5058 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005059 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005060 if (status)
5061 return PCI_ERS_RESULT_DISCONNECT;
5062
Sathya Perlad6b6d982012-09-05 01:56:48 +00005063 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005064 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005065 return PCI_ERS_RESULT_RECOVERED;
5066}
5067
5068static void be_eeh_resume(struct pci_dev *pdev)
5069{
5070 int status = 0;
5071 struct be_adapter *adapter = pci_get_drvdata(pdev);
5072 struct net_device *netdev = adapter->netdev;
5073
5074 dev_info(&adapter->pdev->dev, "EEH resume\n");
5075
5076 pci_save_state(pdev);
5077
Kalesh AP2d177be2013-04-28 22:22:29 +00005078 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005079 if (status)
5080 goto err;
5081
Kalesh AP03a58ba2014-05-13 14:03:11 +05305082 /* On some BE3 FW versions, after a HW reset,
5083 * interrupts will remain disabled for each function.
5084 * So, explicitly enable interrupts
5085 */
5086 be_intr_set(adapter, true);
5087
Kalesh AP2d177be2013-04-28 22:22:29 +00005088 /* tell fw we're ready to fire cmds */
5089 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005090 if (status)
5091 goto err;
5092
Sathya Perlacf588472010-02-14 21:22:01 +00005093 status = be_setup(adapter);
5094 if (status)
5095 goto err;
5096
5097 if (netif_running(netdev)) {
5098 status = be_open(netdev);
5099 if (status)
5100 goto err;
5101 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005102
5103 schedule_delayed_work(&adapter->func_recovery_work,
5104 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005105 netif_device_attach(netdev);
5106 return;
5107err:
5108 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005109}
5110
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005111static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005112 .error_detected = be_eeh_err_detected,
5113 .slot_reset = be_eeh_reset,
5114 .resume = be_eeh_resume,
5115};
5116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005117static struct pci_driver be_driver = {
5118 .name = DRV_NAME,
5119 .id_table = be_dev_ids,
5120 .probe = be_probe,
5121 .remove = be_remove,
5122 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005123 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005124 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005125 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005126};
5127
5128static int __init be_init_module(void)
5129{
Joe Perches8e95a202009-12-03 07:58:21 +00005130 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5131 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005132 printk(KERN_WARNING DRV_NAME
5133 " : Module param rx_frag_size must be 2048/4096/8192."
5134 " Using 2048\n");
5135 rx_frag_size = 2048;
5136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005137
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005138 return pci_register_driver(&be_driver);
5139}
5140module_init(be_init_module);
5141
5142static void __exit be_exit_module(void)
5143{
5144 pci_unregister_driver(&be_driver);
5145}
5146module_exit(be_exit_module);