blob: fc44bb331717e76bcdf11b59b1daf0c6a6cdf1cf [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530655 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530916static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000926 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000927 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000932 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
936
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530938 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000939 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530940 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000941 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000943
Somnath Kotur93040ae2012-06-26 22:32:10 +0000944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000951 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530952 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000973 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530974 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000975 }
976
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 return skb;
978tx_drop:
979 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530980err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000981 return NULL;
982}
983
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530984static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985 struct sk_buff *skb,
986 bool *skip_hw_vlan)
987{
988 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989 * less may cause a transmit stall on that port. So the work-around is
990 * to pad short packets (<= 32 bytes) to a 36-byte length.
991 */
992 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993 if (skb_padto(skb, 36))
994 return NULL;
995 skb->len = 36;
996 }
997
998 if (BEx_chip(adapter) || lancer_chip(adapter)) {
999 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000 if (!skb)
1001 return NULL;
1002 }
1003
1004 return skb;
1005}
1006
Sathya Perlaee9c7992013-05-22 23:04:55 +00001007static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
1010 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011 struct be_queue_info *txq = &txo->q;
1012 bool dummy_wrb, stopped = false;
1013 u32 wrb_cnt = 0, copied = 0;
1014 bool skip_hw_vlan = false;
1015 u32 start = txq->head;
1016
1017 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301018 if (!skb) {
1019 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001020 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301021 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001022
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001023 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001025 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001027 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001028 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001030 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001031 BUG_ON(txo->sent_skb_list[start]);
1032 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001034 /* Ensure txq has space for the next skb; Else stop the queue
1035 * *BEFORE* ringing the tx doorbell, so that we serialze the
1036 * tx compls of the current transmit which'll wake up the queue
1037 */
Sathya Perla7101e112010-03-22 20:41:12 +00001038 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001039 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001041 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001042 stopped = true;
1043 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001045 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001046
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001047 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001048 } else {
1049 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301050 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 return NETDEV_TX_OK;
1054}
1055
1056static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
1059 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001060 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 dev_info(&adapter->pdev->dev,
1063 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001064 BE_MIN_MTU,
1065 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066 return -EINVAL;
1067 }
1068 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069 netdev->mtu, new_mtu);
1070 netdev->mtu = new_mtu;
1071 return 0;
1072}
1073
1074/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001075 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 */
Sathya Perla10329df2012-06-05 19:37:18 +00001078static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079{
Sathya Perla10329df2012-06-05 19:37:18 +00001080 u16 vids[BE_NUM_VLANS_SUPPORTED];
1081 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001082 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001083
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001084 /* No need to further configure vids if in promiscuous mode */
1085 if (adapter->promiscuous)
1086 return 0;
1087
Sathya Perla92bf14a2013-08-27 16:57:32 +05301088 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001089 goto set_vlan_promisc;
1090
1091 /* Construct VLAN Table to give to HW */
1092 for (i = 0; i < VLAN_N_VID; i++)
1093 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001094 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001095
1096 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001097 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001098
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001099 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001100 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102 goto set_vlan_promisc;
1103 dev_err(&adapter->pdev->dev,
1104 "Setting HW VLAN filtering failed.\n");
1105 } else {
1106 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107 /* hw VLAN filtering re-enabled. */
1108 status = be_cmd_rx_filter(adapter,
1109 BE_FLAGS_VLAN_PROMISC, OFF);
1110 if (!status) {
1111 dev_info(&adapter->pdev->dev,
1112 "Disabling VLAN Promiscuous mode.\n");
1113 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001114 }
1115 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001117
Sathya Perlab31c50a2009-09-17 10:30:13 -07001118 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001119
1120set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001123
1124 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128 } else
1129 dev_err(&adapter->pdev->dev,
1130 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001131 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132}
1133
Patrick McHardy80d5c362013-04-19 02:04:28 +00001134static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135{
1136 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001137 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
1141 goto ret;
1142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301144 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001145
Somnath Kotura6b74e02014-01-21 15:50:55 +05301146 status = be_vid_config(adapter);
1147 if (status) {
1148 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001149 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301150 }
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151ret:
1152 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153}
1154
Patrick McHardy80d5c362013-04-19 02:04:28 +00001155static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001158 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001160 /* Packets with VID 0 are always received by Lancer by default */
1161 if (lancer_chip(adapter) && vid == 0)
1162 goto ret;
1163
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301165 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001166 if (!status)
1167 adapter->vlans_added--;
1168 else
1169 adapter->vlan_tag[vid] = 1;
1170ret:
1171 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172}
1173
Somnath kotur7ad09452014-03-03 14:24:43 +05301174static void be_clear_promisc(struct be_adapter *adapter)
1175{
1176 adapter->promiscuous = false;
1177 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1178
1179 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180}
1181
Sathya Perlaa54769f2011-10-24 02:45:00 +00001182static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183{
1184 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001185 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186
1187 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001188 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001189 adapter->promiscuous = true;
1190 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001192
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001193 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001194 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301195 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001196 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001197 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001198 }
1199
Sathya Perlae7b909a2009-11-22 22:01:10 +00001200 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001201 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301202 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001203 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001204 goto done;
1205 }
1206
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001207 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1208 struct netdev_hw_addr *ha;
1209 int i = 1; /* First slot is claimed by the Primary MAC */
1210
1211 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1212 be_cmd_pmac_del(adapter, adapter->if_handle,
1213 adapter->pmac_id[i], 0);
1214 }
1215
Sathya Perla92bf14a2013-08-27 16:57:32 +05301216 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001217 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1218 adapter->promiscuous = true;
1219 goto done;
1220 }
1221
1222 netdev_for_each_uc_addr(ha, adapter->netdev) {
1223 adapter->uc_macs++; /* First slot is for Primary MAC */
1224 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1225 adapter->if_handle,
1226 &adapter->pmac_id[adapter->uc_macs], 0);
1227 }
1228 }
1229
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001230 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1231
1232 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1233 if (status) {
1234 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1235 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1236 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1237 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001238done:
1239 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240}
1241
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001242static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1243{
1244 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001245 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001246 int status;
1247
Sathya Perla11ac75e2011-12-13 00:58:50 +00001248 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001249 return -EPERM;
1250
Sathya Perla11ac75e2011-12-13 00:58:50 +00001251 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001252 return -EINVAL;
1253
Sathya Perla3175d8c2013-07-23 15:25:03 +05301254 if (BEx_chip(adapter)) {
1255 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1256 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001257
Sathya Perla11ac75e2011-12-13 00:58:50 +00001258 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1259 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301260 } else {
1261 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1262 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001263 }
1264
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001265 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001266 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1267 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001268 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001270
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return status;
1272}
1273
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001274static int be_get_vf_config(struct net_device *netdev, int vf,
1275 struct ifla_vf_info *vi)
1276{
1277 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001279
Sathya Perla11ac75e2011-12-13 00:58:50 +00001280 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001281 return -EPERM;
1282
Sathya Perla11ac75e2011-12-13 00:58:50 +00001283 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001284 return -EINVAL;
1285
1286 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001287 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001288 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001290 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301291 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001292
1293 return 0;
1294}
1295
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001296static int be_set_vf_vlan(struct net_device *netdev,
1297 int vf, u16 vlan, u8 qos)
1298{
1299 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001300 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001301 int status = 0;
1302
Sathya Perla11ac75e2011-12-13 00:58:50 +00001303 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001304 return -EPERM;
1305
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001306 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001307 return -EINVAL;
1308
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001309 if (vlan || qos) {
1310 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301311 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001312 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1313 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001314 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001315 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301316 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1317 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001318 }
1319
Somnath Koturc5022242014-03-03 14:24:20 +05301320 if (!status)
1321 vf_cfg->vlan_tag = vlan;
1322 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001323 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301324 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325 return status;
1326}
1327
Ajit Khapardee1d18732010-07-23 01:52:13 +00001328static int be_set_vf_tx_rate(struct net_device *netdev,
1329 int vf, int rate)
1330{
1331 struct be_adapter *adapter = netdev_priv(netdev);
1332 int status = 0;
1333
Sathya Perla11ac75e2011-12-13 00:58:50 +00001334 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001335 return -EPERM;
1336
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001337 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001338 return -EINVAL;
1339
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001340 if (rate < 100 || rate > 10000) {
1341 dev_err(&adapter->pdev->dev,
1342 "tx rate must be between 100 and 10000 Mbps\n");
1343 return -EINVAL;
1344 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001345
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001346 if (lancer_chip(adapter))
1347 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1348 else
1349 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001350
1351 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001352 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001353 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001354 else
1355 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001356 return status;
1357}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301358static int be_set_vf_link_state(struct net_device *netdev, int vf,
1359 int link_state)
1360{
1361 struct be_adapter *adapter = netdev_priv(netdev);
1362 int status;
1363
1364 if (!sriov_enabled(adapter))
1365 return -EPERM;
1366
1367 if (vf >= adapter->num_vfs)
1368 return -EINVAL;
1369
1370 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1371 if (!status)
1372 adapter->vf_cfg[vf].plink_tracking = link_state;
1373
1374 return status;
1375}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001376
Sathya Perla2632baf2013-10-01 16:00:00 +05301377static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1378 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla2632baf2013-10-01 16:00:00 +05301380 aic->rx_pkts_prev = rx_pkts;
1381 aic->tx_reqs_prev = tx_pkts;
1382 aic->jiffies = now;
1383}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001384
Sathya Perla2632baf2013-10-01 16:00:00 +05301385static void be_eqd_update(struct be_adapter *adapter)
1386{
1387 struct be_set_eqd set_eqd[MAX_EVT_QS];
1388 int eqd, i, num = 0, start;
1389 struct be_aic_obj *aic;
1390 struct be_eq_obj *eqo;
1391 struct be_rx_obj *rxo;
1392 struct be_tx_obj *txo;
1393 u64 rx_pkts, tx_pkts;
1394 ulong now;
1395 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001396
Sathya Perla2632baf2013-10-01 16:00:00 +05301397 for_all_evt_queues(adapter, eqo, i) {
1398 aic = &adapter->aic_obj[eqo->idx];
1399 if (!aic->enable) {
1400 if (aic->jiffies)
1401 aic->jiffies = 0;
1402 eqd = aic->et_eqd;
1403 goto modify_eqd;
1404 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405
Sathya Perla2632baf2013-10-01 16:00:00 +05301406 rxo = &adapter->rx_obj[eqo->idx];
1407 do {
1408 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1409 rx_pkts = rxo->stats.rx_pkts;
1410 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001411
Sathya Perla2632baf2013-10-01 16:00:00 +05301412 txo = &adapter->tx_obj[eqo->idx];
1413 do {
1414 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1415 tx_pkts = txo->stats.tx_reqs;
1416 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001417
Sathya Perla4097f662009-03-24 16:40:13 -07001418
Sathya Perla2632baf2013-10-01 16:00:00 +05301419 /* Skip, if wrapped around or first calculation */
1420 now = jiffies;
1421 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1422 rx_pkts < aic->rx_pkts_prev ||
1423 tx_pkts < aic->tx_reqs_prev) {
1424 be_aic_update(aic, rx_pkts, tx_pkts, now);
1425 continue;
1426 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001427
Sathya Perla2632baf2013-10-01 16:00:00 +05301428 delta = jiffies_to_msecs(now - aic->jiffies);
1429 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1430 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1431 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001432
Sathya Perla2632baf2013-10-01 16:00:00 +05301433 if (eqd < 8)
1434 eqd = 0;
1435 eqd = min_t(u32, eqd, aic->max_eqd);
1436 eqd = max_t(u32, eqd, aic->min_eqd);
1437
1438 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001439modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301440 if (eqd != aic->prev_eqd) {
1441 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1442 set_eqd[num].eq_id = eqo->q.id;
1443 aic->prev_eqd = eqd;
1444 num++;
1445 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001446 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301447
1448 if (num)
1449 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001450}
1451
Sathya Perla3abcded2010-10-03 22:12:27 -07001452static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001453 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001454{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001455 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001456
Sathya Perlaab1594e2011-07-25 19:10:15 +00001457 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001458 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001459 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001460 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001461 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001462 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001463 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001464 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001465 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466}
1467
Sathya Perla2e588f82011-03-11 02:49:26 +00001468static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001469{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001470 /* L4 checksum is not reliable for non TCP/UDP packets.
1471 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1473 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001474}
1475
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301476static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001478 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001480 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301481 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482
Sathya Perla3abcded2010-10-03 22:12:27 -07001483 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 BUG_ON(!rx_page_info->page);
1485
Sathya Perlae50287b2014-03-04 12:14:38 +05301486 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 dma_unmap_page(&adapter->pdev->dev,
1488 dma_unmap_addr(rx_page_info, bus),
1489 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301490 rx_page_info->last_frag = false;
1491 } else {
1492 dma_sync_single_for_cpu(&adapter->pdev->dev,
1493 dma_unmap_addr(rx_page_info, bus),
1494 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001495 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301497 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 atomic_dec(&rxq->used);
1499 return rx_page_info;
1500}
1501
1502/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503static void be_rx_compl_discard(struct be_rx_obj *rxo,
1504 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001507 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001509 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301510 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001511 put_page(page_info->page);
1512 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 }
1514}
1515
1516/*
1517 * skb_fill_rx_data forms a complete skb for an ether frame
1518 * indicated by rxcp.
1519 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001520static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1521 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001524 u16 i, j;
1525 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526 u8 *start;
1527
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301528 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 start = page_address(page_info->page) + page_info->page_offset;
1530 prefetch(start);
1531
1532 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001533 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 skb->len = curr_frag_len;
1536 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001537 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 /* Complete packet has now been moved to data */
1539 put_page(page_info->page);
1540 skb->data_len = 0;
1541 skb->tail += curr_frag_len;
1542 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001543 hdr_len = ETH_HLEN;
1544 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001546 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 skb_shinfo(skb)->frags[0].page_offset =
1548 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001549 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001551 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 skb->tail += hdr_len;
1553 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001554 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555
Sathya Perla2e588f82011-03-11 02:49:26 +00001556 if (rxcp->pkt_size <= rx_frag_size) {
1557 BUG_ON(rxcp->num_rcvd != 1);
1558 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 }
1560
1561 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001562 remaining = rxcp->pkt_size - curr_frag_len;
1563 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301564 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001565 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001567 /* Coalesce all frags from the same physical page in one slot */
1568 if (page_info->page_offset == 0) {
1569 /* Fresh page */
1570 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001571 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001572 skb_shinfo(skb)->frags[j].page_offset =
1573 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001574 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001575 skb_shinfo(skb)->nr_frags++;
1576 } else {
1577 put_page(page_info->page);
1578 }
1579
Eric Dumazet9e903e02011-10-18 21:00:24 +00001580 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 skb->len += curr_frag_len;
1582 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001583 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001584 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001585 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001587 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588}
1589
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001590/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301591static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001592 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001594 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001595 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001597
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001598 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001599 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001600 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 return;
1603 }
1604
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001607 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001608 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001609 else
1610 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001612 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001613 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001614 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001615 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301616 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617
Jiri Pirko343e43c2011-08-25 02:50:51 +00001618 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001619 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001620
1621 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622}
1623
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001624/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001625static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1626 struct napi_struct *napi,
1627 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001629 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001631 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001632 u16 remaining, curr_frag_len;
1633 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001634
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001635 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001636 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001637 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001638 return;
1639 }
1640
Sathya Perla2e588f82011-03-11 02:49:26 +00001641 remaining = rxcp->pkt_size;
1642 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301643 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
1645 curr_frag_len = min(remaining, rx_frag_size);
1646
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001647 /* Coalesce all frags from the same physical page in one slot */
1648 if (i == 0 || page_info->page_offset == 0) {
1649 /* First frag or Fresh page */
1650 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001651 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001652 skb_shinfo(skb)->frags[j].page_offset =
1653 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001654 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001655 } else {
1656 put_page(page_info->page);
1657 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001658 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001659 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 memset(page_info, 0, sizeof(*page_info));
1662 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001663 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001665 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001666 skb->len = rxcp->pkt_size;
1667 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001668 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001669 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001670 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001671 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301672 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001673
Jiri Pirko343e43c2011-08-25 02:50:51 +00001674 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001675 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001676
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001677 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678}
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1681 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682{
Sathya Perla2e588f82011-03-11 02:49:26 +00001683 rxcp->pkt_size =
1684 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1685 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1686 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1687 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001688 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001689 rxcp->ip_csum =
1690 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1691 rxcp->l4_csum =
1692 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1693 rxcp->ipv6 =
1694 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001695 rxcp->num_rcvd =
1696 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1697 rxcp->pkt_type =
1698 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001699 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001700 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001701 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301702 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001703 compl);
1704 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1705 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001706 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001707 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001708}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001710static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1711 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001712{
1713 rxcp->pkt_size =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1715 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1716 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1717 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001718 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 rxcp->ip_csum =
1720 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1721 rxcp->l4_csum =
1722 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1723 rxcp->ipv6 =
1724 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001725 rxcp->num_rcvd =
1726 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1727 rxcp->pkt_type =
1728 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001729 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001730 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001731 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301732 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001733 compl);
1734 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1735 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001736 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001737 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001738 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1739 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001740}
1741
1742static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1743{
1744 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1745 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1746 struct be_adapter *adapter = rxo->adapter;
1747
1748 /* For checking the valid bit it is Ok to use either definition as the
1749 * valid bit is at the same position in both v0 and v1 Rx compl */
1750 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 return NULL;
1752
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001753 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001754 be_dws_le_to_cpu(compl, sizeof(*compl));
1755
1756 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001757 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001758 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001759 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001760
Somnath Koture38b1702013-05-29 22:55:56 +00001761 if (rxcp->ip_frag)
1762 rxcp->l4_csum = 0;
1763
Sathya Perla15d72182011-03-21 20:49:26 +00001764 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301765 /* In QNQ modes, if qnq bit is not set, then the packet was
1766 * tagged only with the transparent outer vlan-tag and must
1767 * not be treated as a vlan packet by host
1768 */
1769 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001770 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001771
Sathya Perla15d72182011-03-21 20:49:26 +00001772 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001773 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001774
Somnath Kotur939cf302011-08-18 21:51:49 -07001775 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001776 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001777 rxcp->vlanf = 0;
1778 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001779
1780 /* As the compl has been parsed, reset it; we wont touch it again */
1781 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782
Sathya Perla3abcded2010-10-03 22:12:27 -07001783 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784 return rxcp;
1785}
1786
Eric Dumazet1829b082011-03-01 05:48:12 +00001787static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001790
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001792 gfp |= __GFP_COMP;
1793 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794}
1795
1796/*
1797 * Allocate a page, split it to fragments of size rx_frag_size and post as
1798 * receive buffers to BE
1799 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001800static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801{
Sathya Perla3abcded2010-10-03 22:12:27 -07001802 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001803 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001804 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001806 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807 struct be_eth_rx_d *rxd;
1808 u64 page_dmaaddr = 0, frag_dmaaddr;
1809 u32 posted, page_offset = 0;
1810
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1813 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001814 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001816 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817 break;
1818 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001819 page_dmaaddr = dma_map_page(dev, pagep, 0,
1820 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001821 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001822 if (dma_mapping_error(dev, page_dmaaddr)) {
1823 put_page(pagep);
1824 pagep = NULL;
1825 rx_stats(rxo)->rx_post_fail++;
1826 break;
1827 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301828 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 } else {
1830 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301831 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301833 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835
1836 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301837 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1839 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840
1841 /* Any space left in the current big page for another frag? */
1842 if ((page_offset + rx_frag_size + rx_frag_size) >
1843 adapter->big_page_size) {
1844 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301845 page_info->last_frag = true;
1846 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1847 } else {
1848 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001850
1851 prev_page_info = page_info;
1852 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001853 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301855
1856 /* Mark the last frag of a page when we break out of the above loop
1857 * with no more slots available in the RXQ
1858 */
1859 if (pagep) {
1860 prev_page_info->last_frag = true;
1861 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1862 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863
1864 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301866 if (rxo->rx_post_starved)
1867 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001868 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001869 } else if (atomic_read(&rxq->used) == 0) {
1870 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873}
1874
Sathya Perla5fb379e2009-06-18 00:02:59 +00001875static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1878
1879 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1880 return NULL;
1881
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001882 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1884
1885 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1886
1887 queue_tail_inc(tx_cq);
1888 return txcp;
1889}
1890
Sathya Perla3c8def92011-06-12 20:01:58 +00001891static u16 be_tx_compl_process(struct be_adapter *adapter,
1892 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893{
Sathya Perla3c8def92011-06-12 20:01:58 +00001894 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001895 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001896 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001898 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1899 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001901 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001903 sent_skbs[txq->tail] = NULL;
1904
1905 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001906 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001908 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001910 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001911 unmap_tx_frag(&adapter->pdev->dev, wrb,
1912 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001913 unmap_skb_hdr = false;
1914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 num_wrbs++;
1916 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001917 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001920 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921}
1922
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001923/* Return the number of events in the event queue */
1924static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001925{
1926 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001927 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929 do {
1930 eqe = queue_tail_node(&eqo->q);
1931 if (eqe->evt == 0)
1932 break;
1933
1934 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001935 eqe->evt = 0;
1936 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937 queue_tail_inc(&eqo->q);
1938 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001939
1940 return num;
1941}
1942
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943/* Leaves the EQ is disarmed state */
1944static void be_eq_clean(struct be_eq_obj *eqo)
1945{
1946 int num = events_get(eqo);
1947
1948 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1949}
1950
1951static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
1953 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001954 struct be_queue_info *rxq = &rxo->q;
1955 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001956 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001957 struct be_adapter *adapter = rxo->adapter;
1958 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959
Sathya Perlad23e9462012-12-17 19:38:51 +00001960 /* Consume pending rx completions.
1961 * Wait for the flush completion (identified by zero num_rcvd)
1962 * to arrive. Notify CQ even when there are no more CQ entries
1963 * for HW to flush partially coalesced CQ entries.
1964 * In Lancer, there is no need to wait for flush compl.
1965 */
1966 for (;;) {
1967 rxcp = be_rx_compl_get(rxo);
1968 if (rxcp == NULL) {
1969 if (lancer_chip(adapter))
1970 break;
1971
1972 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1973 dev_warn(&adapter->pdev->dev,
1974 "did not receive flush compl\n");
1975 break;
1976 }
1977 be_cq_notify(adapter, rx_cq->id, true, 0);
1978 mdelay(1);
1979 } else {
1980 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001981 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001982 if (rxcp->num_rcvd == 0)
1983 break;
1984 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985 }
1986
Sathya Perlad23e9462012-12-17 19:38:51 +00001987 /* After cleanup, leave the CQ in unarmed state */
1988 be_cq_notify(adapter, rx_cq->id, false, 0);
1989
1990 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301991 while (atomic_read(&rxq->used) > 0) {
1992 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 put_page(page_info->page);
1994 memset(page_info, 0, sizeof(*page_info));
1995 }
1996 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001997 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998}
1999
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002000static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002002 struct be_tx_obj *txo;
2003 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002004 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002005 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002006 struct sk_buff *sent_skb;
2007 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002008 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009
Sathya Perlaa8e91792009-08-10 03:42:43 +00002010 /* Wait for a max of 200ms for all the tx-completions to arrive. */
2011 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002012 pending_txqs = adapter->num_tx_qs;
2013
2014 for_all_tx_queues(adapter, txo, i) {
2015 txq = &txo->q;
2016 while ((txcp = be_tx_compl_get(&txo->cq))) {
2017 end_idx =
2018 AMAP_GET_BITS(struct amap_eth_tx_compl,
2019 wrb_index, txcp);
2020 num_wrbs += be_tx_compl_process(adapter, txo,
2021 end_idx);
2022 cmpl++;
2023 }
2024 if (cmpl) {
2025 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2026 atomic_sub(num_wrbs, &txq->used);
2027 cmpl = 0;
2028 num_wrbs = 0;
2029 }
2030 if (atomic_read(&txq->used) == 0)
2031 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002032 }
2033
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002034 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00002035 break;
2036
2037 mdelay(1);
2038 } while (true);
2039
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002040 for_all_tx_queues(adapter, txo, i) {
2041 txq = &txo->q;
2042 if (atomic_read(&txq->used))
2043 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2044 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002045
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002046 /* free posted tx for which compls will never arrive */
2047 while (atomic_read(&txq->used)) {
2048 sent_skb = txo->sent_skb_list[txq->tail];
2049 end_idx = txq->tail;
2050 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2051 &dummy_wrb);
2052 index_adv(&end_idx, num_wrbs - 1, txq->len);
2053 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2054 atomic_sub(num_wrbs, &txq->used);
2055 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002056 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057}
2058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059static void be_evt_queues_destroy(struct be_adapter *adapter)
2060{
2061 struct be_eq_obj *eqo;
2062 int i;
2063
2064 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002065 if (eqo->q.created) {
2066 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302068 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302069 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002070 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 be_queue_free(adapter, &eqo->q);
2072 }
2073}
2074
2075static int be_evt_queues_create(struct be_adapter *adapter)
2076{
2077 struct be_queue_info *eq;
2078 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302079 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 int i, rc;
2081
Sathya Perla92bf14a2013-08-27 16:57:32 +05302082 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2083 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002084
2085 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302086 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2087 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302088 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302089 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002090 eqo->adapter = adapter;
2091 eqo->tx_budget = BE_TX_BUDGET;
2092 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302093 aic->max_eqd = BE_MAX_EQD;
2094 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095
2096 eq = &eqo->q;
2097 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2098 sizeof(struct be_eq_entry));
2099 if (rc)
2100 return rc;
2101
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302102 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 if (rc)
2104 return rc;
2105 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002106 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107}
2108
Sathya Perla5fb379e2009-06-18 00:02:59 +00002109static void be_mcc_queues_destroy(struct be_adapter *adapter)
2110{
2111 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002112
Sathya Perla8788fdc2009-07-27 22:52:03 +00002113 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002114 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002115 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002116 be_queue_free(adapter, q);
2117
Sathya Perla8788fdc2009-07-27 22:52:03 +00002118 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002119 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002120 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002121 be_queue_free(adapter, q);
2122}
2123
2124/* Must be called only after TX qs are created as MCC shares TX EQ */
2125static int be_mcc_queues_create(struct be_adapter *adapter)
2126{
2127 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002128
Sathya Perla8788fdc2009-07-27 22:52:03 +00002129 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002130 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002131 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002132 goto err;
2133
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134 /* Use the default EQ for MCC completions */
2135 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002136 goto mcc_cq_free;
2137
Sathya Perla8788fdc2009-07-27 22:52:03 +00002138 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002139 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2140 goto mcc_cq_destroy;
2141
Sathya Perla8788fdc2009-07-27 22:52:03 +00002142 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002143 goto mcc_q_free;
2144
2145 return 0;
2146
2147mcc_q_free:
2148 be_queue_free(adapter, q);
2149mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002150 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002151mcc_cq_free:
2152 be_queue_free(adapter, cq);
2153err:
2154 return -1;
2155}
2156
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157static void be_tx_queues_destroy(struct be_adapter *adapter)
2158{
2159 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002160 struct be_tx_obj *txo;
2161 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162
Sathya Perla3c8def92011-06-12 20:01:58 +00002163 for_all_tx_queues(adapter, txo, i) {
2164 q = &txo->q;
2165 if (q->created)
2166 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2167 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168
Sathya Perla3c8def92011-06-12 20:01:58 +00002169 q = &txo->cq;
2170 if (q->created)
2171 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2172 be_queue_free(adapter, q);
2173 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174}
2175
Sathya Perla77071332013-08-27 16:57:34 +05302176static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002179 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302180 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181
Sathya Perla92bf14a2013-08-27 16:57:32 +05302182 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002183
Sathya Perla3c8def92011-06-12 20:01:58 +00002184 for_all_tx_queues(adapter, txo, i) {
2185 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2187 sizeof(struct be_eth_tx_compl));
2188 if (status)
2189 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
John Stultz827da442013-10-07 15:51:58 -07002191 u64_stats_init(&txo->stats.sync);
2192 u64_stats_init(&txo->stats.sync_compl);
2193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194 /* If num_evt_qs is less than num_tx_qs, then more than
2195 * one txq share an eq
2196 */
2197 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2198 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2199 if (status)
2200 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2203 sizeof(struct be_eth_wrb));
2204 if (status)
2205 return status;
2206
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002207 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 if (status)
2209 return status;
2210 }
2211
Sathya Perlad3791422012-09-28 04:39:44 +00002212 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2213 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 return 0;
2215}
2216
2217static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218{
2219 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 struct be_rx_obj *rxo;
2221 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
Sathya Perla3abcded2010-10-03 22:12:27 -07002223 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002224 q = &rxo->cq;
2225 if (q->created)
2226 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2227 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229}
2230
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002232{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002234 struct be_rx_obj *rxo;
2235 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236
Sathya Perla92bf14a2013-08-27 16:57:32 +05302237 /* We can create as many RSS rings as there are EQs. */
2238 adapter->num_rx_qs = adapter->num_evt_qs;
2239
2240 /* We'll use RSS only if atleast 2 RSS rings are supported.
2241 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302243 if (adapter->num_rx_qs > 1)
2244 adapter->num_rx_qs++;
2245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002247 for_all_rx_queues(adapter, rxo, i) {
2248 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002249 cq = &rxo->cq;
2250 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2251 sizeof(struct be_eth_rx_compl));
2252 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
John Stultz827da442013-10-07 15:51:58 -07002255 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2257 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002258 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002260 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261
Sathya Perlad3791422012-09-28 04:39:44 +00002262 dev_info(&adapter->pdev->dev,
2263 "created %d RSS queue(s) and 1 default RX queue\n",
2264 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002266}
2267
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268static irqreturn_t be_intx(int irq, void *dev)
2269{
Sathya Perlae49cc342012-11-27 19:50:02 +00002270 struct be_eq_obj *eqo = dev;
2271 struct be_adapter *adapter = eqo->adapter;
2272 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002274 /* IRQ is not expected when NAPI is scheduled as the EQ
2275 * will not be armed.
2276 * But, this can happen on Lancer INTx where it takes
2277 * a while to de-assert INTx or in BE2 where occasionaly
2278 * an interrupt may be raised even when EQ is unarmed.
2279 * If NAPI is already scheduled, then counting & notifying
2280 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002281 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002282 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002283 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002284 __napi_schedule(&eqo->napi);
2285 if (num_evts)
2286 eqo->spurious_intr = 0;
2287 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002288 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002289
2290 /* Return IRQ_HANDLED only for the the first spurious intr
2291 * after a valid intr to stop the kernel from branding
2292 * this irq as a bad one!
2293 */
2294 if (num_evts || eqo->spurious_intr++ == 0)
2295 return IRQ_HANDLED;
2296 else
2297 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298}
2299
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002302 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303
Sathya Perla0b545a62012-11-23 00:27:18 +00002304 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2305 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 return IRQ_HANDLED;
2307}
2308
Sathya Perla2e588f82011-03-11 02:49:26 +00002309static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310{
Somnath Koture38b1702013-05-29 22:55:56 +00002311 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312}
2313
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302315 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316{
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 struct be_adapter *adapter = rxo->adapter;
2318 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002319 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320 u32 work_done;
2321
2322 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002323 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 if (!rxcp)
2325 break;
2326
Sathya Perla12004ae2011-08-02 19:57:46 +00002327 /* Is it a flush compl that has no data */
2328 if (unlikely(rxcp->num_rcvd == 0))
2329 goto loop_continue;
2330
2331 /* Discard compl with partial DMA Lancer B0 */
2332 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002334 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002335 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002336
Sathya Perla12004ae2011-08-02 19:57:46 +00002337 /* On BE drop pkts that arrive due to imperfect filtering in
2338 * promiscuous mode on some skews
2339 */
2340 if (unlikely(rxcp->port != adapter->port_num &&
2341 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002342 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002343 goto loop_continue;
2344 }
2345
Sathya Perla6384a4d2013-10-25 10:40:16 +05302346 /* Don't do gro when we're busy_polling */
2347 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002349 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302350 be_rx_compl_process(rxo, napi, rxcp);
2351
Sathya Perla12004ae2011-08-02 19:57:46 +00002352loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002353 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354 }
2355
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 if (work_done) {
2357 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002358
Sathya Perla6384a4d2013-10-25 10:40:16 +05302359 /* When an rx-obj gets into post_starved state, just
2360 * let be_worker do the posting.
2361 */
2362 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2363 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 return work_done;
2368}
2369
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2371 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002374 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 for (work_done = 0; work_done < budget; work_done++) {
2377 txcp = be_tx_compl_get(&txo->cq);
2378 if (!txcp)
2379 break;
2380 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002381 AMAP_GET_BITS(struct amap_eth_tx_compl,
2382 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 }
2384
2385 if (work_done) {
2386 be_cq_notify(adapter, txo->cq.id, true, work_done);
2387 atomic_sub(num_wrbs, &txo->q.used);
2388
2389 /* As Tx wrbs have been freed up, wake up netdev queue
2390 * if it was stopped due to lack of tx wrbs. */
2391 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2392 atomic_read(&txo->q.used) < txo->q.len / 2) {
2393 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002394 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2397 tx_stats(txo)->tx_compl += work_done;
2398 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2399 }
2400 return (work_done < budget); /* Done */
2401}
Sathya Perla3c8def92011-06-12 20:01:58 +00002402
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302403int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404{
2405 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2406 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002407 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302408 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002410
Sathya Perla0b545a62012-11-23 00:27:18 +00002411 num_evts = events_get(eqo);
2412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 /* Process all TXQs serviced by this EQ */
2414 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2415 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2416 eqo->tx_budget, i);
2417 if (!tx_done)
2418 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419 }
2420
Sathya Perla6384a4d2013-10-25 10:40:16 +05302421 if (be_lock_napi(eqo)) {
2422 /* This loop will iterate twice for EQ0 in which
2423 * completions of the last RXQ (default one) are also processed
2424 * For other EQs the loop iterates only once
2425 */
2426 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2427 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2428 max_work = max(work, max_work);
2429 }
2430 be_unlock_napi(eqo);
2431 } else {
2432 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002433 }
2434
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 if (is_mcc_eqo(eqo))
2436 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002437
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 if (max_work < budget) {
2439 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002440 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002441 } else {
2442 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002443 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002444 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446}
2447
Sathya Perla6384a4d2013-10-25 10:40:16 +05302448#ifdef CONFIG_NET_RX_BUSY_POLL
2449static int be_busy_poll(struct napi_struct *napi)
2450{
2451 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2452 struct be_adapter *adapter = eqo->adapter;
2453 struct be_rx_obj *rxo;
2454 int i, work = 0;
2455
2456 if (!be_lock_busy_poll(eqo))
2457 return LL_FLUSH_BUSY;
2458
2459 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2460 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2461 if (work)
2462 break;
2463 }
2464
2465 be_unlock_busy_poll(eqo);
2466 return work;
2467}
2468#endif
2469
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002470void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002471{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002472 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2473 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002474 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302475 bool error_detected = false;
2476 struct device *dev = &adapter->pdev->dev;
2477 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002478
Sathya Perlad23e9462012-12-17 19:38:51 +00002479 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002480 return;
2481
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002482 if (lancer_chip(adapter)) {
2483 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2484 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2485 sliport_err1 = ioread32(adapter->db +
2486 SLIPORT_ERROR1_OFFSET);
2487 sliport_err2 = ioread32(adapter->db +
2488 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302489 adapter->hw_error = true;
2490 /* Do not log error messages if its a FW reset */
2491 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2492 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2493 dev_info(dev, "Firmware update in progress\n");
2494 } else {
2495 error_detected = true;
2496 dev_err(dev, "Error detected in the card\n");
2497 dev_err(dev, "ERR: sliport status 0x%x\n",
2498 sliport_status);
2499 dev_err(dev, "ERR: sliport error1 0x%x\n",
2500 sliport_err1);
2501 dev_err(dev, "ERR: sliport error2 0x%x\n",
2502 sliport_err2);
2503 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002504 }
2505 } else {
2506 pci_read_config_dword(adapter->pdev,
2507 PCICFG_UE_STATUS_LOW, &ue_lo);
2508 pci_read_config_dword(adapter->pdev,
2509 PCICFG_UE_STATUS_HIGH, &ue_hi);
2510 pci_read_config_dword(adapter->pdev,
2511 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2512 pci_read_config_dword(adapter->pdev,
2513 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002514
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002515 ue_lo = (ue_lo & ~ue_lo_mask);
2516 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002517
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302518 /* On certain platforms BE hardware can indicate spurious UEs.
2519 * Allow HW to stop working completely in case of a real UE.
2520 * Hence not setting the hw_error for UE detection.
2521 */
2522
2523 if (ue_lo || ue_hi) {
2524 error_detected = true;
2525 dev_err(dev,
2526 "Unrecoverable Error detected in the adapter");
2527 dev_err(dev, "Please reboot server to recover");
2528 if (skyhawk_chip(adapter))
2529 adapter->hw_error = true;
2530 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2531 if (ue_lo & 1)
2532 dev_err(dev, "UE: %s bit set\n",
2533 ue_status_low_desc[i]);
2534 }
2535 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2536 if (ue_hi & 1)
2537 dev_err(dev, "UE: %s bit set\n",
2538 ue_status_hi_desc[i]);
2539 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302540 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002541 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302542 if (error_detected)
2543 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002544}
2545
Sathya Perla8d56ff12009-11-22 22:02:26 +00002546static void be_msix_disable(struct be_adapter *adapter)
2547{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002548 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002549 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002550 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302551 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002552 }
2553}
2554
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002555static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002557 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002558 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002559
Sathya Perla92bf14a2013-08-27 16:57:32 +05302560 /* If RoCE is supported, program the max number of NIC vectors that
2561 * may be configured via set-channels, along with vectors needed for
2562 * RoCe. Else, just program the number we'll use initially.
2563 */
2564 if (be_roce_supported(adapter))
2565 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2566 2 * num_online_cpus());
2567 else
2568 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002569
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002570 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571 adapter->msix_entries[i].entry = i;
2572
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002573 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2574 MIN_MSIX_VECTORS, num_vec);
2575 if (num_vec < 0)
2576 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002577
Sathya Perla92bf14a2013-08-27 16:57:32 +05302578 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2579 adapter->num_msix_roce_vec = num_vec / 2;
2580 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2581 adapter->num_msix_roce_vec);
2582 }
2583
2584 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2585
2586 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2587 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002588 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002589
2590fail:
2591 dev_warn(dev, "MSIx enable failed\n");
2592
2593 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2594 if (!be_physfn(adapter))
2595 return num_vec;
2596 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597}
2598
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002599static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002600 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002601{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302602 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603}
2604
2605static int be_msix_register(struct be_adapter *adapter)
2606{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607 struct net_device *netdev = adapter->netdev;
2608 struct be_eq_obj *eqo;
2609 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611 for_all_evt_queues(adapter, eqo, i) {
2612 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2613 vec = be_msix_vec_get(adapter, eqo);
2614 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002615 if (status)
2616 goto err_msix;
2617 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002620err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2622 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2623 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2624 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002625 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002626 return status;
2627}
2628
2629static int be_irq_register(struct be_adapter *adapter)
2630{
2631 struct net_device *netdev = adapter->netdev;
2632 int status;
2633
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002634 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635 status = be_msix_register(adapter);
2636 if (status == 0)
2637 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002638 /* INTx is not supported for VF */
2639 if (!be_physfn(adapter))
2640 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641 }
2642
Sathya Perlae49cc342012-11-27 19:50:02 +00002643 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644 netdev->irq = adapter->pdev->irq;
2645 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002646 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002647 if (status) {
2648 dev_err(&adapter->pdev->dev,
2649 "INTx request IRQ failed - err %d\n", status);
2650 return status;
2651 }
2652done:
2653 adapter->isr_registered = true;
2654 return 0;
2655}
2656
2657static void be_irq_unregister(struct be_adapter *adapter)
2658{
2659 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002661 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662
2663 if (!adapter->isr_registered)
2664 return;
2665
2666 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002667 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002668 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002669 goto done;
2670 }
2671
2672 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002673 for_all_evt_queues(adapter, eqo, i)
2674 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002675
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002676done:
2677 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002678}
2679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002680static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002681{
2682 struct be_queue_info *q;
2683 struct be_rx_obj *rxo;
2684 int i;
2685
2686 for_all_rx_queues(adapter, rxo, i) {
2687 q = &rxo->q;
2688 if (q->created) {
2689 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002690 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002691 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002692 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002693 }
2694}
2695
Sathya Perla889cd4b2010-05-30 23:33:45 +00002696static int be_close(struct net_device *netdev)
2697{
2698 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699 struct be_eq_obj *eqo;
2700 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002701
Parav Pandit045508a2012-03-26 14:27:13 +00002702 be_roce_dev_close(adapter);
2703
Ivan Veceradff345c52013-11-27 08:59:32 +01002704 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2705 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002706 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302707 be_disable_busy_poll(eqo);
2708 }
David S. Miller71237b62013-11-28 18:53:36 -05002709 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002710 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002711
2712 be_async_mcc_disable(adapter);
2713
2714 /* Wait for all pending tx completions to arrive so that
2715 * all tx skbs are freed.
2716 */
Sathya Perlafba87552013-05-08 02:05:50 +00002717 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302718 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002719
2720 be_rx_qs_destroy(adapter);
2721
Ajit Khaparded11a3472013-11-18 10:44:37 -06002722 for (i = 1; i < (adapter->uc_macs + 1); i++)
2723 be_cmd_pmac_del(adapter, adapter->if_handle,
2724 adapter->pmac_id[i], 0);
2725 adapter->uc_macs = 0;
2726
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002727 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 if (msix_enabled(adapter))
2729 synchronize_irq(be_msix_vec_get(adapter, eqo));
2730 else
2731 synchronize_irq(netdev->irq);
2732 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002733 }
2734
Sathya Perla889cd4b2010-05-30 23:33:45 +00002735 be_irq_unregister(adapter);
2736
Sathya Perla482c9e72011-06-29 23:33:17 +00002737 return 0;
2738}
2739
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002740static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002741{
2742 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002743 int rc, i, j;
2744 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002745
2746 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2748 sizeof(struct be_eth_rx_d));
2749 if (rc)
2750 return rc;
2751 }
2752
2753 /* The FW would like the default RXQ to be created first */
2754 rxo = default_rxo(adapter);
2755 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2756 adapter->if_handle, false, &rxo->rss_id);
2757 if (rc)
2758 return rc;
2759
2760 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002761 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002762 rx_frag_size, adapter->if_handle,
2763 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002764 if (rc)
2765 return rc;
2766 }
2767
2768 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002769 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2770 for_all_rss_queues(adapter, rxo, i) {
2771 if ((j + i) >= 128)
2772 break;
2773 rsstable[j + i] = rxo->rss_id;
2774 }
2775 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002776 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2777 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2778
2779 if (!BEx_chip(adapter))
2780 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2781 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302782 } else {
2783 /* Disable RSS, if only default RX Q is created */
2784 adapter->rss_flags = RSS_ENABLE_NONE;
2785 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002786
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302787 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2788 128);
2789 if (rc) {
2790 adapter->rss_flags = RSS_ENABLE_NONE;
2791 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002792 }
2793
2794 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002795 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002796 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002797 return 0;
2798}
2799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002800static int be_open(struct net_device *netdev)
2801{
2802 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002804 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002806 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002807 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002808
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002809 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002810 if (status)
2811 goto err;
2812
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002813 status = be_irq_register(adapter);
2814 if (status)
2815 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002816
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002817 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002818 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002819
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 for_all_tx_queues(adapter, txo, i)
2821 be_cq_notify(adapter, txo->cq.id, true, 0);
2822
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002823 be_async_mcc_enable(adapter);
2824
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 for_all_evt_queues(adapter, eqo, i) {
2826 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302827 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002828 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2829 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002830 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831
Sathya Perla323ff712012-09-28 04:39:43 +00002832 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002833 if (!status)
2834 be_link_status_update(adapter, link_status);
2835
Sathya Perlafba87552013-05-08 02:05:50 +00002836 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002837 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002838 return 0;
2839err:
2840 be_close(adapter->netdev);
2841 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002842}
2843
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002844static int be_setup_wol(struct be_adapter *adapter, bool enable)
2845{
2846 struct be_dma_mem cmd;
2847 int status = 0;
2848 u8 mac[ETH_ALEN];
2849
2850 memset(mac, 0, ETH_ALEN);
2851
2852 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002853 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2854 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002855 if (cmd.va == NULL)
2856 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002857
2858 if (enable) {
2859 status = pci_write_config_dword(adapter->pdev,
2860 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2861 if (status) {
2862 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002863 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002864 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2865 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002866 return status;
2867 }
2868 status = be_cmd_enable_magic_wol(adapter,
2869 adapter->netdev->dev_addr, &cmd);
2870 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2871 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2872 } else {
2873 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2874 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2875 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2876 }
2877
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002878 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002879 return status;
2880}
2881
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002882/*
2883 * Generate a seed MAC address from the PF MAC Address using jhash.
2884 * MAC Address for VFs are assigned incrementally starting from the seed.
2885 * These addresses are programmed in the ASIC by the PF and the VF driver
2886 * queries for the MAC address during its probe.
2887 */
Sathya Perla4c876612013-02-03 20:30:11 +00002888static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002889{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002890 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002891 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002892 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002893 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002894
2895 be_vf_eth_addr_generate(adapter, mac);
2896
Sathya Perla11ac75e2011-12-13 00:58:50 +00002897 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302898 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002899 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002900 vf_cfg->if_handle,
2901 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302902 else
2903 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2904 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002905
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002906 if (status)
2907 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002908 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002909 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002910 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002911
2912 mac[5] += 1;
2913 }
2914 return status;
2915}
2916
Sathya Perla4c876612013-02-03 20:30:11 +00002917static int be_vfs_mac_query(struct be_adapter *adapter)
2918{
2919 int status, vf;
2920 u8 mac[ETH_ALEN];
2921 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002922
2923 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302924 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2925 mac, vf_cfg->if_handle,
2926 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002927 if (status)
2928 return status;
2929 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2930 }
2931 return 0;
2932}
2933
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002934static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002935{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002936 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002937 u32 vf;
2938
Sathya Perla257a3fe2013-06-14 15:54:51 +05302939 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002940 dev_warn(&adapter->pdev->dev,
2941 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002942 goto done;
2943 }
2944
Sathya Perlab4c1df92013-05-08 02:05:47 +00002945 pci_disable_sriov(adapter->pdev);
2946
Sathya Perla11ac75e2011-12-13 00:58:50 +00002947 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302948 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002949 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2950 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302951 else
2952 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2953 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002954
Sathya Perla11ac75e2011-12-13 00:58:50 +00002955 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2956 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002957done:
2958 kfree(adapter->vf_cfg);
2959 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002960}
2961
Sathya Perla77071332013-08-27 16:57:34 +05302962static void be_clear_queues(struct be_adapter *adapter)
2963{
2964 be_mcc_queues_destroy(adapter);
2965 be_rx_cqs_destroy(adapter);
2966 be_tx_queues_destroy(adapter);
2967 be_evt_queues_destroy(adapter);
2968}
2969
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302970static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002971{
Sathya Perla191eb752012-02-23 18:50:13 +00002972 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2973 cancel_delayed_work_sync(&adapter->work);
2974 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2975 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302976}
2977
Somnath Koturb05004a2013-12-05 12:08:16 +05302978static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302979{
2980 int i;
2981
Somnath Koturb05004a2013-12-05 12:08:16 +05302982 if (adapter->pmac_id) {
2983 for (i = 0; i < (adapter->uc_macs + 1); i++)
2984 be_cmd_pmac_del(adapter, adapter->if_handle,
2985 adapter->pmac_id[i], 0);
2986 adapter->uc_macs = 0;
2987
2988 kfree(adapter->pmac_id);
2989 adapter->pmac_id = NULL;
2990 }
2991}
2992
2993static int be_clear(struct be_adapter *adapter)
2994{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302995 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002996
Sathya Perla11ac75e2011-12-13 00:58:50 +00002997 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002998 be_vf_clear(adapter);
2999
Sathya Perla2d17f402013-07-23 15:25:04 +05303000 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303001 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003002
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003003 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003004
Sathya Perla77071332013-08-27 16:57:34 +05303005 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003007 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003008 return 0;
3009}
3010
Sathya Perla4c876612013-02-03 20:30:11 +00003011static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003012{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303013 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003014 struct be_vf_cfg *vf_cfg;
3015 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003016 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003017
Sathya Perla4c876612013-02-03 20:30:11 +00003018 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3019 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003020
Sathya Perla4c876612013-02-03 20:30:11 +00003021 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303022 if (!BE3_chip(adapter)) {
3023 status = be_cmd_get_profile_config(adapter, &res,
3024 vf + 1);
3025 if (!status)
3026 cap_flags = res.if_cap_flags;
3027 }
Sathya Perla4c876612013-02-03 20:30:11 +00003028
3029 /* If a FW profile exists, then cap_flags are updated */
3030 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3031 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3032 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3033 &vf_cfg->if_handle, vf + 1);
3034 if (status)
3035 goto err;
3036 }
3037err:
3038 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003039}
3040
Sathya Perla39f1d942012-05-08 19:41:24 +00003041static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003042{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003043 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003044 int vf;
3045
Sathya Perla39f1d942012-05-08 19:41:24 +00003046 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3047 GFP_KERNEL);
3048 if (!adapter->vf_cfg)
3049 return -ENOMEM;
3050
Sathya Perla11ac75e2011-12-13 00:58:50 +00003051 for_all_vfs(adapter, vf_cfg, vf) {
3052 vf_cfg->if_handle = -1;
3053 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003054 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003055 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003056}
3057
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003058static int be_vf_setup(struct be_adapter *adapter)
3059{
Sathya Perla4c876612013-02-03 20:30:11 +00003060 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303061 struct be_vf_cfg *vf_cfg;
3062 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303063 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303064 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003065
Sathya Perla257a3fe2013-06-14 15:54:51 +05303066 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003067 if (old_vfs) {
3068 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3069 if (old_vfs != num_vfs)
3070 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3071 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003072 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303073 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003074 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303075 be_max_vfs(adapter), num_vfs);
3076 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003077 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003078 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003079 }
3080
3081 status = be_vf_setup_init(adapter);
3082 if (status)
3083 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003084
Sathya Perla4c876612013-02-03 20:30:11 +00003085 if (old_vfs) {
3086 for_all_vfs(adapter, vf_cfg, vf) {
3087 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3088 if (status)
3089 goto err;
3090 }
3091 } else {
3092 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003093 if (status)
3094 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003095 }
3096
Sathya Perla4c876612013-02-03 20:30:11 +00003097 if (old_vfs) {
3098 status = be_vfs_mac_query(adapter);
3099 if (status)
3100 goto err;
3101 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003102 status = be_vf_eth_addr_config(adapter);
3103 if (status)
3104 goto err;
3105 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003106
Sathya Perla11ac75e2011-12-13 00:58:50 +00003107 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303108 /* Allow VFs to programs MAC/VLAN filters */
3109 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3110 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3111 status = be_cmd_set_fn_privileges(adapter,
3112 privileges |
3113 BE_PRIV_FILTMGMT,
3114 vf + 1);
3115 if (!status)
3116 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3117 vf);
3118 }
3119
Sathya Perla4c876612013-02-03 20:30:11 +00003120 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3121 * Allow full available bandwidth
3122 */
3123 if (BE3_chip(adapter) && !old_vfs)
3124 be_cmd_set_qos(adapter, 1000, vf+1);
3125
3126 status = be_cmd_link_status_query(adapter, &lnk_speed,
3127 NULL, vf + 1);
3128 if (!status)
3129 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003130
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303131 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303132 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303133 be_cmd_set_logical_link_config(adapter,
3134 IFLA_VF_LINK_STATE_AUTO,
3135 vf+1);
3136 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003137 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003138
3139 if (!old_vfs) {
3140 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3141 if (status) {
3142 dev_err(dev, "SRIOV enable failed\n");
3143 adapter->num_vfs = 0;
3144 goto err;
3145 }
3146 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003147 return 0;
3148err:
Sathya Perla4c876612013-02-03 20:30:11 +00003149 dev_err(dev, "VF setup failed\n");
3150 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003151 return status;
3152}
3153
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303154/* Converting function_mode bits on BE3 to SH mc_type enums */
3155
3156static u8 be_convert_mc_type(u32 function_mode)
3157{
3158 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3159 return vNIC1;
3160 else if (function_mode & FLEX10_MODE)
3161 return FLEX10;
3162 else if (function_mode & VNIC_MODE)
3163 return vNIC2;
3164 else if (function_mode & UMC_ENABLED)
3165 return UMC;
3166 else
3167 return MC_NONE;
3168}
3169
Sathya Perla92bf14a2013-08-27 16:57:32 +05303170/* On BE2/BE3 FW does not suggest the supported limits */
3171static void BEx_get_resources(struct be_adapter *adapter,
3172 struct be_resources *res)
3173{
3174 struct pci_dev *pdev = adapter->pdev;
3175 bool use_sriov = false;
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303176 int max_vfs = 0;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303177
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303178 if (be_physfn(adapter) && BE3_chip(adapter)) {
3179 be_cmd_get_profile_config(adapter, res, 0);
3180 /* Some old versions of BE3 FW don't report max_vfs value */
3181 if (res->max_vfs == 0) {
3182 max_vfs = pci_sriov_get_totalvfs(pdev);
3183 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3184 }
3185 use_sriov = res->max_vfs && sriov_want(adapter);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303186 }
3187
3188 if (be_physfn(adapter))
3189 res->max_uc_mac = BE_UC_PMAC_COUNT;
3190 else
3191 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3192
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303193 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3194
3195 if (be_is_mc(adapter)) {
3196 /* Assuming that there are 4 channels per port,
3197 * when multi-channel is enabled
3198 */
3199 if (be_is_qnq_mode(adapter))
3200 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3201 else
3202 /* In a non-qnq multichannel mode, the pvid
3203 * takes up one vlan entry
3204 */
3205 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3206 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303207 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303208 }
3209
Sathya Perla92bf14a2013-08-27 16:57:32 +05303210 res->max_mcast_mac = BE_MAX_MC;
3211
Vasundhara Volama5243da2014-03-11 18:53:07 +05303212 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3213 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3214 * *only* if it is RSS-capable.
3215 */
3216 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3217 !be_physfn(adapter) || (be_is_mc(adapter) &&
3218 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303219 res->max_tx_qs = 1;
3220 else
3221 res->max_tx_qs = BE3_MAX_TX_QS;
3222
3223 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3224 !use_sriov && be_physfn(adapter))
3225 res->max_rss_qs = (adapter->be3_native) ?
3226 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3227 res->max_rx_qs = res->max_rss_qs + 1;
3228
Suresh Reddye3dc8672014-01-06 13:02:25 +05303229 if (be_physfn(adapter))
Suresh Reddyecf1f6e2014-03-11 18:53:03 +05303230 res->max_evt_qs = (res->max_vfs > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303231 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3232 else
3233 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303234
3235 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3236 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3237 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3238}
3239
Sathya Perla30128032011-11-10 19:17:57 +00003240static void be_setup_init(struct be_adapter *adapter)
3241{
3242 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003243 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003244 adapter->if_handle = -1;
3245 adapter->be3_native = false;
3246 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003247 if (be_physfn(adapter))
3248 adapter->cmd_privileges = MAX_PRIVILEGES;
3249 else
3250 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003251}
3252
Sathya Perla92bf14a2013-08-27 16:57:32 +05303253static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003254{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303255 struct device *dev = &adapter->pdev->dev;
3256 struct be_resources res = {0};
3257 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003258
Sathya Perla92bf14a2013-08-27 16:57:32 +05303259 if (BEx_chip(adapter)) {
3260 BEx_get_resources(adapter, &res);
3261 adapter->res = res;
3262 }
3263
Sathya Perla92bf14a2013-08-27 16:57:32 +05303264 /* For Lancer, SH etc read per-function resource limits from FW.
3265 * GET_FUNC_CONFIG returns per function guaranteed limits.
3266 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3267 */
Sathya Perla4c876612013-02-03 20:30:11 +00003268 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303269 status = be_cmd_get_func_config(adapter, &res);
3270 if (status)
3271 return status;
3272
3273 /* If RoCE may be enabled stash away half the EQs for RoCE */
3274 if (be_roce_supported(adapter))
3275 res.max_evt_qs /= 2;
3276 adapter->res = res;
3277
3278 if (be_physfn(adapter)) {
3279 status = be_cmd_get_profile_config(adapter, &res, 0);
3280 if (status)
3281 return status;
3282 adapter->res.max_vfs = res.max_vfs;
3283 }
3284
3285 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3286 be_max_txqs(adapter), be_max_rxqs(adapter),
3287 be_max_rss(adapter), be_max_eqs(adapter),
3288 be_max_vfs(adapter));
3289 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3290 be_max_uc(adapter), be_max_mc(adapter),
3291 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003292 }
3293
Sathya Perla92bf14a2013-08-27 16:57:32 +05303294 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003295}
3296
Sathya Perla39f1d942012-05-08 19:41:24 +00003297/* Routine to query per function resource limits */
3298static int be_get_config(struct be_adapter *adapter)
3299{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303300 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003301 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003302
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003303 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3304 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003305 &adapter->function_caps,
3306 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003307 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303308 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003309
Vasundhara Volam542963b2014-01-15 13:23:33 +05303310 if (be_physfn(adapter)) {
3311 status = be_cmd_get_active_profile(adapter, &profile_id);
3312 if (!status)
3313 dev_info(&adapter->pdev->dev,
3314 "Using profile 0x%x\n", profile_id);
3315 }
3316
Sathya Perla92bf14a2013-08-27 16:57:32 +05303317 status = be_get_resources(adapter);
3318 if (status)
3319 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003320
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303321 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3322 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303323 if (!adapter->pmac_id)
3324 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003325
Sathya Perla92bf14a2013-08-27 16:57:32 +05303326 /* Sanitize cfg_num_qs based on HW and platform limits */
3327 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3328
3329 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003330}
3331
Sathya Perla95046b92013-07-23 15:25:02 +05303332static int be_mac_setup(struct be_adapter *adapter)
3333{
3334 u8 mac[ETH_ALEN];
3335 int status;
3336
3337 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3338 status = be_cmd_get_perm_mac(adapter, mac);
3339 if (status)
3340 return status;
3341
3342 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3343 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3344 } else {
3345 /* Maybe the HW was reset; dev_addr must be re-programmed */
3346 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3347 }
3348
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003349 /* For BE3-R VFs, the PF programs the initial MAC address */
3350 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3351 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3352 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303353 return 0;
3354}
3355
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303356static void be_schedule_worker(struct be_adapter *adapter)
3357{
3358 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3359 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3360}
3361
Sathya Perla77071332013-08-27 16:57:34 +05303362static int be_setup_queues(struct be_adapter *adapter)
3363{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303364 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303365 int status;
3366
3367 status = be_evt_queues_create(adapter);
3368 if (status)
3369 goto err;
3370
3371 status = be_tx_qs_create(adapter);
3372 if (status)
3373 goto err;
3374
3375 status = be_rx_cqs_create(adapter);
3376 if (status)
3377 goto err;
3378
3379 status = be_mcc_queues_create(adapter);
3380 if (status)
3381 goto err;
3382
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303383 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3384 if (status)
3385 goto err;
3386
3387 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3388 if (status)
3389 goto err;
3390
Sathya Perla77071332013-08-27 16:57:34 +05303391 return 0;
3392err:
3393 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3394 return status;
3395}
3396
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303397int be_update_queues(struct be_adapter *adapter)
3398{
3399 struct net_device *netdev = adapter->netdev;
3400 int status;
3401
3402 if (netif_running(netdev))
3403 be_close(netdev);
3404
3405 be_cancel_worker(adapter);
3406
3407 /* If any vectors have been shared with RoCE we cannot re-program
3408 * the MSIx table.
3409 */
3410 if (!adapter->num_msix_roce_vec)
3411 be_msix_disable(adapter);
3412
3413 be_clear_queues(adapter);
3414
3415 if (!msix_enabled(adapter)) {
3416 status = be_msix_enable(adapter);
3417 if (status)
3418 return status;
3419 }
3420
3421 status = be_setup_queues(adapter);
3422 if (status)
3423 return status;
3424
3425 be_schedule_worker(adapter);
3426
3427 if (netif_running(netdev))
3428 status = be_open(netdev);
3429
3430 return status;
3431}
3432
Sathya Perla5fb379e2009-06-18 00:02:59 +00003433static int be_setup(struct be_adapter *adapter)
3434{
Sathya Perla39f1d942012-05-08 19:41:24 +00003435 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303436 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003437 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003438
Sathya Perla30128032011-11-10 19:17:57 +00003439 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003440
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003441 if (!lancer_chip(adapter))
3442 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003443
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003444 status = be_get_config(adapter);
3445 if (status)
3446 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003447
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003448 status = be_msix_enable(adapter);
3449 if (status)
3450 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003451
Sathya Perla77071332013-08-27 16:57:34 +05303452 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3453 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3454 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3455 en_flags |= BE_IF_FLAGS_RSS;
3456 en_flags = en_flags & be_if_cap_flags(adapter);
3457 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3458 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003459 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003460 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003461
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303462 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3463 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303464 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303465 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003466 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003467 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003468
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003469 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003470
Sathya Perla95046b92013-07-23 15:25:02 +05303471 status = be_mac_setup(adapter);
3472 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003473 goto err;
3474
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003475 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003476
Somnath Koture9e2a902013-10-24 14:37:53 +05303477 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3478 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3479 adapter->fw_ver);
3480 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3481 }
3482
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003483 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003484 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003485
3486 be_set_rx_mode(adapter->netdev);
3487
Suresh Reddy76a9e082014-01-15 13:23:40 +05303488 be_cmd_get_acpi_wol_cap(adapter);
3489
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003490 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003491
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003492 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3493 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003494 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003495
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303496 if (be_physfn(adapter))
3497 be_cmd_set_logical_link_config(adapter,
3498 IFLA_VF_LINK_STATE_AUTO, 0);
3499
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303500 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303501 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003502 be_vf_setup(adapter);
3503 else
3504 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003505 }
3506
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003507 status = be_cmd_get_phy_info(adapter);
3508 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003509 adapter->phy.fc_autoneg = 1;
3510
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303511 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003512 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003513err:
3514 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515 return status;
3516}
3517
Ivan Vecera66268732011-12-08 01:31:21 +00003518#ifdef CONFIG_NET_POLL_CONTROLLER
3519static void be_netpoll(struct net_device *netdev)
3520{
3521 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003522 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003523 int i;
3524
Sathya Perlae49cc342012-11-27 19:50:02 +00003525 for_all_evt_queues(adapter, eqo, i) {
3526 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3527 napi_schedule(&eqo->napi);
3528 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003529
3530 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003531}
3532#endif
3533
Ajit Khaparde84517482009-09-04 03:12:16 +00003534#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003535static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003536
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003537static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003538 const u8 *p, u32 img_start, int image_size,
3539 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003540{
3541 u32 crc_offset;
3542 u8 flashed_crc[4];
3543 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003544
3545 crc_offset = hdr_size + img_start + image_size - 4;
3546
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003547 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003548
3549 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003550 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003551 if (status) {
3552 dev_err(&adapter->pdev->dev,
3553 "could not get crc from flash, not flashing redboot\n");
3554 return false;
3555 }
3556
3557 /*update redboot only if crc does not match*/
3558 if (!memcmp(flashed_crc, p, 4))
3559 return false;
3560 else
3561 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003562}
3563
Sathya Perla306f1342011-08-02 19:57:45 +00003564static bool phy_flashing_required(struct be_adapter *adapter)
3565{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003566 return (adapter->phy.phy_type == TN_8022 &&
3567 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003568}
3569
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003570static bool is_comp_in_ufi(struct be_adapter *adapter,
3571 struct flash_section_info *fsec, int type)
3572{
3573 int i = 0, img_type = 0;
3574 struct flash_section_info_g2 *fsec_g2 = NULL;
3575
Sathya Perlaca34fe32012-11-06 17:48:56 +00003576 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003577 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3578
3579 for (i = 0; i < MAX_FLASH_COMP; i++) {
3580 if (fsec_g2)
3581 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3582 else
3583 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3584
3585 if (img_type == type)
3586 return true;
3587 }
3588 return false;
3589
3590}
3591
Jingoo Han4188e7d2013-08-05 18:02:02 +09003592static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003593 int header_size,
3594 const struct firmware *fw)
3595{
3596 struct flash_section_info *fsec = NULL;
3597 const u8 *p = fw->data;
3598
3599 p += header_size;
3600 while (p < (fw->data + fw->size)) {
3601 fsec = (struct flash_section_info *)p;
3602 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3603 return fsec;
3604 p += 32;
3605 }
3606 return NULL;
3607}
3608
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003609static int be_flash(struct be_adapter *adapter, const u8 *img,
3610 struct be_dma_mem *flash_cmd, int optype, int img_size)
3611{
3612 u32 total_bytes = 0, flash_op, num_bytes = 0;
3613 int status = 0;
3614 struct be_cmd_write_flashrom *req = flash_cmd->va;
3615
3616 total_bytes = img_size;
3617 while (total_bytes) {
3618 num_bytes = min_t(u32, 32*1024, total_bytes);
3619
3620 total_bytes -= num_bytes;
3621
3622 if (!total_bytes) {
3623 if (optype == OPTYPE_PHY_FW)
3624 flash_op = FLASHROM_OPER_PHY_FLASH;
3625 else
3626 flash_op = FLASHROM_OPER_FLASH;
3627 } else {
3628 if (optype == OPTYPE_PHY_FW)
3629 flash_op = FLASHROM_OPER_PHY_SAVE;
3630 else
3631 flash_op = FLASHROM_OPER_SAVE;
3632 }
3633
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003634 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003635 img += num_bytes;
3636 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3637 flash_op, num_bytes);
3638 if (status) {
3639 if (status == ILLEGAL_IOCTL_REQ &&
3640 optype == OPTYPE_PHY_FW)
3641 break;
3642 dev_err(&adapter->pdev->dev,
3643 "cmd to write to flash rom failed.\n");
3644 return status;
3645 }
3646 }
3647 return 0;
3648}
3649
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003650/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003651static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003652 const struct firmware *fw,
3653 struct be_dma_mem *flash_cmd,
3654 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003655
Ajit Khaparde84517482009-09-04 03:12:16 +00003656{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003657 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003658 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003659 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003660 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003661 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003662 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003663
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003664 struct flash_comp gen3_flash_types[] = {
3665 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3666 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3667 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3668 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3669 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3670 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3671 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3672 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3673 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3674 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3675 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3676 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3677 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3678 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3679 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3680 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3681 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3682 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3683 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3684 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003685 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003686
3687 struct flash_comp gen2_flash_types[] = {
3688 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3689 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3690 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3691 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3692 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3693 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3694 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3695 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3696 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3697 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3698 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3699 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3700 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3701 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3702 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3703 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003704 };
3705
Sathya Perlaca34fe32012-11-06 17:48:56 +00003706 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003707 pflashcomp = gen3_flash_types;
3708 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003709 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003710 } else {
3711 pflashcomp = gen2_flash_types;
3712 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003713 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003714 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003715
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003716 /* Get flash section info*/
3717 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3718 if (!fsec) {
3719 dev_err(&adapter->pdev->dev,
3720 "Invalid Cookie. UFI corrupted ?\n");
3721 return -1;
3722 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003723 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003724 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003725 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003726
3727 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3728 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3729 continue;
3730
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003731 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3732 !phy_flashing_required(adapter))
3733 continue;
3734
3735 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3736 redboot = be_flash_redboot(adapter, fw->data,
3737 pflashcomp[i].offset, pflashcomp[i].size,
3738 filehdr_size + img_hdrs_size);
3739 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003740 continue;
3741 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003742
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003743 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003744 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003745 if (p + pflashcomp[i].size > fw->data + fw->size)
3746 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003747
3748 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3749 pflashcomp[i].size);
3750 if (status) {
3751 dev_err(&adapter->pdev->dev,
3752 "Flashing section type %d failed.\n",
3753 pflashcomp[i].img_type);
3754 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003755 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003756 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003757 return 0;
3758}
3759
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003760static int be_flash_skyhawk(struct be_adapter *adapter,
3761 const struct firmware *fw,
3762 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003763{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003764 int status = 0, i, filehdr_size = 0;
3765 int img_offset, img_size, img_optype, redboot;
3766 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3767 const u8 *p = fw->data;
3768 struct flash_section_info *fsec = NULL;
3769
3770 filehdr_size = sizeof(struct flash_file_hdr_g3);
3771 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3772 if (!fsec) {
3773 dev_err(&adapter->pdev->dev,
3774 "Invalid Cookie. UFI corrupted ?\n");
3775 return -1;
3776 }
3777
3778 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3779 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3780 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3781
3782 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3783 case IMAGE_FIRMWARE_iSCSI:
3784 img_optype = OPTYPE_ISCSI_ACTIVE;
3785 break;
3786 case IMAGE_BOOT_CODE:
3787 img_optype = OPTYPE_REDBOOT;
3788 break;
3789 case IMAGE_OPTION_ROM_ISCSI:
3790 img_optype = OPTYPE_BIOS;
3791 break;
3792 case IMAGE_OPTION_ROM_PXE:
3793 img_optype = OPTYPE_PXE_BIOS;
3794 break;
3795 case IMAGE_OPTION_ROM_FCoE:
3796 img_optype = OPTYPE_FCOE_BIOS;
3797 break;
3798 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3799 img_optype = OPTYPE_ISCSI_BACKUP;
3800 break;
3801 case IMAGE_NCSI:
3802 img_optype = OPTYPE_NCSI_FW;
3803 break;
3804 default:
3805 continue;
3806 }
3807
3808 if (img_optype == OPTYPE_REDBOOT) {
3809 redboot = be_flash_redboot(adapter, fw->data,
3810 img_offset, img_size,
3811 filehdr_size + img_hdrs_size);
3812 if (!redboot)
3813 continue;
3814 }
3815
3816 p = fw->data;
3817 p += filehdr_size + img_offset + img_hdrs_size;
3818 if (p + img_size > fw->data + fw->size)
3819 return -1;
3820
3821 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3822 if (status) {
3823 dev_err(&adapter->pdev->dev,
3824 "Flashing section type %d failed.\n",
3825 fsec->fsec_entry[i].type);
3826 return status;
3827 }
3828 }
3829 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003830}
3831
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003832static int lancer_fw_download(struct be_adapter *adapter,
3833 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003834{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003835#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3836#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3837 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003838 const u8 *data_ptr = NULL;
3839 u8 *dest_image_ptr = NULL;
3840 size_t image_size = 0;
3841 u32 chunk_size = 0;
3842 u32 data_written = 0;
3843 u32 offset = 0;
3844 int status = 0;
3845 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003846 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003847
3848 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3849 dev_err(&adapter->pdev->dev,
3850 "FW Image not properly aligned. "
3851 "Length must be 4 byte aligned.\n");
3852 status = -EINVAL;
3853 goto lancer_fw_exit;
3854 }
3855
3856 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3857 + LANCER_FW_DOWNLOAD_CHUNK;
3858 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003859 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003860 if (!flash_cmd.va) {
3861 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003862 goto lancer_fw_exit;
3863 }
3864
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003865 dest_image_ptr = flash_cmd.va +
3866 sizeof(struct lancer_cmd_req_write_object);
3867 image_size = fw->size;
3868 data_ptr = fw->data;
3869
3870 while (image_size) {
3871 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3872
3873 /* Copy the image chunk content. */
3874 memcpy(dest_image_ptr, data_ptr, chunk_size);
3875
3876 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003877 chunk_size, offset,
3878 LANCER_FW_DOWNLOAD_LOCATION,
3879 &data_written, &change_status,
3880 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003881 if (status)
3882 break;
3883
3884 offset += data_written;
3885 data_ptr += data_written;
3886 image_size -= data_written;
3887 }
3888
3889 if (!status) {
3890 /* Commit the FW written */
3891 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003892 0, offset,
3893 LANCER_FW_DOWNLOAD_LOCATION,
3894 &data_written, &change_status,
3895 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003896 }
3897
3898 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3899 flash_cmd.dma);
3900 if (status) {
3901 dev_err(&adapter->pdev->dev,
3902 "Firmware load error. "
3903 "Status code: 0x%x Additional Status: 0x%x\n",
3904 status, add_status);
3905 goto lancer_fw_exit;
3906 }
3907
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003908 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303909 dev_info(&adapter->pdev->dev,
3910 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003911 status = lancer_physdev_ctrl(adapter,
3912 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003913 if (status) {
3914 dev_err(&adapter->pdev->dev,
3915 "Adapter busy for FW reset.\n"
3916 "New FW will not be active.\n");
3917 goto lancer_fw_exit;
3918 }
3919 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3920 dev_err(&adapter->pdev->dev,
3921 "System reboot required for new FW"
3922 " to be active\n");
3923 }
3924
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003925 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3926lancer_fw_exit:
3927 return status;
3928}
3929
Sathya Perlaca34fe32012-11-06 17:48:56 +00003930#define UFI_TYPE2 2
3931#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003932#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003933#define UFI_TYPE4 4
3934static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003935 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003936{
3937 if (fhdr == NULL)
3938 goto be_get_ufi_exit;
3939
Sathya Perlaca34fe32012-11-06 17:48:56 +00003940 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3941 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003942 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3943 if (fhdr->asic_type_rev == 0x10)
3944 return UFI_TYPE3R;
3945 else
3946 return UFI_TYPE3;
3947 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003948 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003949
3950be_get_ufi_exit:
3951 dev_err(&adapter->pdev->dev,
3952 "UFI and Interface are not compatible for flashing\n");
3953 return -1;
3954}
3955
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003956static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3957{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003958 struct flash_file_hdr_g3 *fhdr3;
3959 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003960 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003961 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003962 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003963
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003964 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003965 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3966 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003967 if (!flash_cmd.va) {
3968 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003969 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003970 }
3971
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003972 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003973 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003974
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003975 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003976
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003977 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3978 for (i = 0; i < num_imgs; i++) {
3979 img_hdr_ptr = (struct image_hdr *)(fw->data +
3980 (sizeof(struct flash_file_hdr_g3) +
3981 i * sizeof(struct image_hdr)));
3982 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003983 switch (ufi_type) {
3984 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003985 status = be_flash_skyhawk(adapter, fw,
3986 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003987 break;
3988 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003989 status = be_flash_BEx(adapter, fw, &flash_cmd,
3990 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003991 break;
3992 case UFI_TYPE3:
3993 /* Do not flash this ufi on BE3-R cards */
3994 if (adapter->asic_rev < 0x10)
3995 status = be_flash_BEx(adapter, fw,
3996 &flash_cmd,
3997 num_imgs);
3998 else {
3999 status = -1;
4000 dev_err(&adapter->pdev->dev,
4001 "Can't load BE3 UFI on BE3R\n");
4002 }
4003 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004004 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004005 }
4006
Sathya Perlaca34fe32012-11-06 17:48:56 +00004007 if (ufi_type == UFI_TYPE2)
4008 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004009 else if (ufi_type == -1)
4010 status = -1;
4011
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004012 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4013 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004014 if (status) {
4015 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004016 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004017 }
4018
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004019 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004020
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004021be_fw_exit:
4022 return status;
4023}
4024
4025int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4026{
4027 const struct firmware *fw;
4028 int status;
4029
4030 if (!netif_running(adapter->netdev)) {
4031 dev_err(&adapter->pdev->dev,
4032 "Firmware load not allowed (interface is down)\n");
4033 return -1;
4034 }
4035
4036 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4037 if (status)
4038 goto fw_exit;
4039
4040 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4041
4042 if (lancer_chip(adapter))
4043 status = lancer_fw_download(adapter, fw);
4044 else
4045 status = be_fw_download(adapter, fw);
4046
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004047 if (!status)
4048 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4049 adapter->fw_on_flash);
4050
Ajit Khaparde84517482009-09-04 03:12:16 +00004051fw_exit:
4052 release_firmware(fw);
4053 return status;
4054}
4055
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004056static int be_ndo_bridge_setlink(struct net_device *dev,
4057 struct nlmsghdr *nlh)
4058{
4059 struct be_adapter *adapter = netdev_priv(dev);
4060 struct nlattr *attr, *br_spec;
4061 int rem;
4062 int status = 0;
4063 u16 mode = 0;
4064
4065 if (!sriov_enabled(adapter))
4066 return -EOPNOTSUPP;
4067
4068 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4069
4070 nla_for_each_nested(attr, br_spec, rem) {
4071 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4072 continue;
4073
4074 mode = nla_get_u16(attr);
4075 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4076 return -EINVAL;
4077
4078 status = be_cmd_set_hsw_config(adapter, 0, 0,
4079 adapter->if_handle,
4080 mode == BRIDGE_MODE_VEPA ?
4081 PORT_FWD_TYPE_VEPA :
4082 PORT_FWD_TYPE_VEB);
4083 if (status)
4084 goto err;
4085
4086 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4087 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4088
4089 return status;
4090 }
4091err:
4092 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4093 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4094
4095 return status;
4096}
4097
4098static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4099 struct net_device *dev,
4100 u32 filter_mask)
4101{
4102 struct be_adapter *adapter = netdev_priv(dev);
4103 int status = 0;
4104 u8 hsw_mode;
4105
4106 if (!sriov_enabled(adapter))
4107 return 0;
4108
4109 /* BE and Lancer chips support VEB mode only */
4110 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4111 hsw_mode = PORT_FWD_TYPE_VEB;
4112 } else {
4113 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4114 adapter->if_handle, &hsw_mode);
4115 if (status)
4116 return 0;
4117 }
4118
4119 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4120 hsw_mode == PORT_FWD_TYPE_VEPA ?
4121 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4122}
4123
stephen hemmingere5686ad2012-01-05 19:10:25 +00004124static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125 .ndo_open = be_open,
4126 .ndo_stop = be_close,
4127 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004128 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004129 .ndo_set_mac_address = be_mac_addr_set,
4130 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004131 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004132 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004133 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4134 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004135 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004136 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004137 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004138 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304139 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004140#ifdef CONFIG_NET_POLL_CONTROLLER
4141 .ndo_poll_controller = be_netpoll,
4142#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004143 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4144 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304145#ifdef CONFIG_NET_RX_BUSY_POLL
4146 .ndo_busy_poll = be_busy_poll
4147#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004148};
4149
4150static void be_netdev_init(struct net_device *netdev)
4151{
4152 struct be_adapter *adapter = netdev_priv(netdev);
4153
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004154 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004155 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004156 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004157 if (be_multi_rxq(adapter))
4158 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004159
4160 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004161 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004162
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004163 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004164 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004165
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004166 netdev->priv_flags |= IFF_UNICAST_FLT;
4167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004168 netdev->flags |= IFF_MULTICAST;
4169
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004170 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004171
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004172 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004173
4174 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004175}
4176
4177static void be_unmap_pci_bars(struct be_adapter *adapter)
4178{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004179 if (adapter->csr)
4180 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004181 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004182 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004183}
4184
Sathya Perlace66f782012-11-06 17:48:58 +00004185static int db_bar(struct be_adapter *adapter)
4186{
4187 if (lancer_chip(adapter) || !be_physfn(adapter))
4188 return 0;
4189 else
4190 return 4;
4191}
4192
4193static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004194{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004195 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004196 adapter->roce_db.size = 4096;
4197 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4198 db_bar(adapter));
4199 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4200 db_bar(adapter));
4201 }
Parav Pandit045508a2012-03-26 14:27:13 +00004202 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004203}
4204
4205static int be_map_pci_bars(struct be_adapter *adapter)
4206{
4207 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004208
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004209 if (BEx_chip(adapter) && be_physfn(adapter)) {
4210 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4211 if (adapter->csr == NULL)
4212 return -ENOMEM;
4213 }
4214
Sathya Perlace66f782012-11-06 17:48:58 +00004215 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004216 if (addr == NULL)
4217 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004218 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004219
4220 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004221 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004222
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004223pci_map_err:
4224 be_unmap_pci_bars(adapter);
4225 return -ENOMEM;
4226}
4227
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004228static void be_ctrl_cleanup(struct be_adapter *adapter)
4229{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004230 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231
4232 be_unmap_pci_bars(adapter);
4233
4234 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004235 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4236 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004237
Sathya Perla5b8821b2011-08-02 19:57:44 +00004238 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004239 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004240 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4241 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242}
4243
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244static int be_ctrl_init(struct be_adapter *adapter)
4245{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004246 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4247 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004248 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004249 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004251
Sathya Perlace66f782012-11-06 17:48:58 +00004252 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4253 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4254 SLI_INTF_FAMILY_SHIFT;
4255 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4256
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 status = be_map_pci_bars(adapter);
4258 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004259 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004260
4261 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004262 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4263 mbox_mem_alloc->size,
4264 &mbox_mem_alloc->dma,
4265 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004266 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004267 status = -ENOMEM;
4268 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004269 }
4270 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4271 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4272 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4273 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004274
Sathya Perla5b8821b2011-08-02 19:57:44 +00004275 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004276 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4277 rx_filter->size, &rx_filter->dma,
4278 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004279 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004280 status = -ENOMEM;
4281 goto free_mbox;
4282 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004283
Ivan Vecera29849612010-12-14 05:43:19 +00004284 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004285 spin_lock_init(&adapter->mcc_lock);
4286 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004287
Suresh Reddy5eeff632014-01-06 13:02:24 +05304288 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004289 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004290 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004291
4292free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004293 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4294 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004295
4296unmap_pci_bars:
4297 be_unmap_pci_bars(adapter);
4298
4299done:
4300 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004301}
4302
4303static void be_stats_cleanup(struct be_adapter *adapter)
4304{
Sathya Perla3abcded2010-10-03 22:12:27 -07004305 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004306
4307 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004308 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4309 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004310}
4311
4312static int be_stats_init(struct be_adapter *adapter)
4313{
Sathya Perla3abcded2010-10-03 22:12:27 -07004314 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004315
Sathya Perlaca34fe32012-11-06 17:48:56 +00004316 if (lancer_chip(adapter))
4317 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4318 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004319 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004320 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004321 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004322 else
4323 /* ALL non-BE ASICs */
4324 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004325
Joe Perchesede23fa82013-08-26 22:45:23 -07004326 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4327 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004328 if (cmd->va == NULL)
4329 return -1;
4330 return 0;
4331}
4332
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004333static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004334{
4335 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004336
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004337 if (!adapter)
4338 return;
4339
Parav Pandit045508a2012-03-26 14:27:13 +00004340 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004341 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004342
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004343 cancel_delayed_work_sync(&adapter->func_recovery_work);
4344
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004345 unregister_netdev(adapter->netdev);
4346
Sathya Perla5fb379e2009-06-18 00:02:59 +00004347 be_clear(adapter);
4348
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004349 /* tell fw we're done with firing cmds */
4350 be_cmd_fw_clean(adapter);
4351
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004352 be_stats_cleanup(adapter);
4353
4354 be_ctrl_cleanup(adapter);
4355
Sathya Perlad6b6d982012-09-05 01:56:48 +00004356 pci_disable_pcie_error_reporting(pdev);
4357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004358 pci_release_regions(pdev);
4359 pci_disable_device(pdev);
4360
4361 free_netdev(adapter->netdev);
4362}
4363
Sathya Perla39f1d942012-05-08 19:41:24 +00004364static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004365{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304366 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004367
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004368 status = be_cmd_get_cntl_attributes(adapter);
4369 if (status)
4370 return status;
4371
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004372 /* Must be a power of 2 or else MODULO will BUG_ON */
4373 adapter->be_get_temp_freq = 64;
4374
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304375 if (BEx_chip(adapter)) {
4376 level = be_cmd_get_fw_log_level(adapter);
4377 adapter->msg_enable =
4378 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4379 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004380
Sathya Perla92bf14a2013-08-27 16:57:32 +05304381 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004382 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004383}
4384
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004385static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004386{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004387 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004388 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004389
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004390 status = lancer_test_and_set_rdy_state(adapter);
4391 if (status)
4392 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004393
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004394 if (netif_running(adapter->netdev))
4395 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004396
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004397 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004398
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004399 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004400
4401 status = be_setup(adapter);
4402 if (status)
4403 goto err;
4404
4405 if (netif_running(adapter->netdev)) {
4406 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004407 if (status)
4408 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004409 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004410
Somnath Kotur4bebb562013-12-05 12:07:55 +05304411 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004412 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004413err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004414 if (status == -EAGAIN)
4415 dev_err(dev, "Waiting for resource provisioning\n");
4416 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304417 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004418
4419 return status;
4420}
4421
4422static void be_func_recovery_task(struct work_struct *work)
4423{
4424 struct be_adapter *adapter =
4425 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004426 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004427
4428 be_detect_error(adapter);
4429
4430 if (adapter->hw_error && lancer_chip(adapter)) {
4431
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004432 rtnl_lock();
4433 netif_device_detach(adapter->netdev);
4434 rtnl_unlock();
4435
4436 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004437 if (!status)
4438 netif_device_attach(adapter->netdev);
4439 }
4440
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004441 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4442 * no need to attempt further recovery.
4443 */
4444 if (!status || status == -EAGAIN)
4445 schedule_delayed_work(&adapter->func_recovery_work,
4446 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004447}
4448
4449static void be_worker(struct work_struct *work)
4450{
4451 struct be_adapter *adapter =
4452 container_of(work, struct be_adapter, work.work);
4453 struct be_rx_obj *rxo;
4454 int i;
4455
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004456 /* when interrupts are not yet enabled, just reap any pending
4457 * mcc completions */
4458 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004459 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004460 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004461 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004462 goto reschedule;
4463 }
4464
4465 if (!adapter->stats_cmd_sent) {
4466 if (lancer_chip(adapter))
4467 lancer_cmd_get_pport_stats(adapter,
4468 &adapter->stats_cmd);
4469 else
4470 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4471 }
4472
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304473 if (be_physfn(adapter) &&
4474 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004475 be_cmd_get_die_temperature(adapter);
4476
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004477 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304478 /* Replenish RX-queues starved due to memory
4479 * allocation failures.
4480 */
4481 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004482 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004483 }
4484
Sathya Perla2632baf2013-10-01 16:00:00 +05304485 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004486
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004487reschedule:
4488 adapter->work_counter++;
4489 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4490}
4491
Sathya Perla257a3fe2013-06-14 15:54:51 +05304492/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004493static bool be_reset_required(struct be_adapter *adapter)
4494{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304495 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004496}
4497
Sathya Perlad3791422012-09-28 04:39:44 +00004498static char *mc_name(struct be_adapter *adapter)
4499{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304500 char *str = ""; /* default */
4501
4502 switch (adapter->mc_type) {
4503 case UMC:
4504 str = "UMC";
4505 break;
4506 case FLEX10:
4507 str = "FLEX10";
4508 break;
4509 case vNIC1:
4510 str = "vNIC-1";
4511 break;
4512 case nPAR:
4513 str = "nPAR";
4514 break;
4515 case UFP:
4516 str = "UFP";
4517 break;
4518 case vNIC2:
4519 str = "vNIC-2";
4520 break;
4521 default:
4522 str = "";
4523 }
4524
4525 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004526}
4527
4528static inline char *func_name(struct be_adapter *adapter)
4529{
4530 return be_physfn(adapter) ? "PF" : "VF";
4531}
4532
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004533static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004534{
4535 int status = 0;
4536 struct be_adapter *adapter;
4537 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004538 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004539
4540 status = pci_enable_device(pdev);
4541 if (status)
4542 goto do_none;
4543
4544 status = pci_request_regions(pdev, DRV_NAME);
4545 if (status)
4546 goto disable_dev;
4547 pci_set_master(pdev);
4548
Sathya Perla7f640062012-06-05 19:37:20 +00004549 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004550 if (netdev == NULL) {
4551 status = -ENOMEM;
4552 goto rel_reg;
4553 }
4554 adapter = netdev_priv(netdev);
4555 adapter->pdev = pdev;
4556 pci_set_drvdata(pdev, adapter);
4557 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004558 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004559
Russell King4c15c242013-06-26 23:49:11 +01004560 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561 if (!status) {
4562 netdev->features |= NETIF_F_HIGHDMA;
4563 } else {
Russell King4c15c242013-06-26 23:49:11 +01004564 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004565 if (status) {
4566 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4567 goto free_netdev;
4568 }
4569 }
4570
Ajit Khapardeea58c182013-10-18 16:06:24 -05004571 if (be_physfn(adapter)) {
4572 status = pci_enable_pcie_error_reporting(pdev);
4573 if (!status)
4574 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4575 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004576
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004577 status = be_ctrl_init(adapter);
4578 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004579 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004580
Sathya Perla2243e2e2009-11-22 22:02:03 +00004581 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004582 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004583 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004584 if (status)
4585 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004586 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004587
Sathya Perla39f1d942012-05-08 19:41:24 +00004588 if (be_reset_required(adapter)) {
4589 status = be_cmd_reset_function(adapter);
4590 if (status)
4591 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004592
Kalesh AP2d177be2013-04-28 22:22:29 +00004593 /* Wait for interrupts to quiesce after an FLR */
4594 msleep(100);
4595 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004596
4597 /* Allow interrupts for other ULPs running on NIC function */
4598 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004599
Kalesh AP2d177be2013-04-28 22:22:29 +00004600 /* tell fw we're ready to fire cmds */
4601 status = be_cmd_fw_init(adapter);
4602 if (status)
4603 goto ctrl_clean;
4604
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004605 status = be_stats_init(adapter);
4606 if (status)
4607 goto ctrl_clean;
4608
Sathya Perla39f1d942012-05-08 19:41:24 +00004609 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004610 if (status)
4611 goto stats_clean;
4612
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004613 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004614 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004615 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004616
Sathya Perla5fb379e2009-06-18 00:02:59 +00004617 status = be_setup(adapter);
4618 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004619 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004620
Sathya Perla3abcded2010-10-03 22:12:27 -07004621 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004622 status = register_netdev(netdev);
4623 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004624 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625
Parav Pandit045508a2012-03-26 14:27:13 +00004626 be_roce_dev_add(adapter);
4627
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004628 schedule_delayed_work(&adapter->func_recovery_work,
4629 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004630
4631 be_cmd_query_port_name(adapter, &port_name);
4632
Sathya Perlad3791422012-09-28 04:39:44 +00004633 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4634 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004636 return 0;
4637
Sathya Perla5fb379e2009-06-18 00:02:59 +00004638unsetup:
4639 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004640stats_clean:
4641 be_stats_cleanup(adapter);
4642ctrl_clean:
4643 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004644free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004645 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004646rel_reg:
4647 pci_release_regions(pdev);
4648disable_dev:
4649 pci_disable_device(pdev);
4650do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004651 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004652 return status;
4653}
4654
4655static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4656{
4657 struct be_adapter *adapter = pci_get_drvdata(pdev);
4658 struct net_device *netdev = adapter->netdev;
4659
Suresh Reddy76a9e082014-01-15 13:23:40 +05304660 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004661 be_setup_wol(adapter, true);
4662
Ajit Khaparded4360d62013-11-22 12:51:09 -06004663 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004664 cancel_delayed_work_sync(&adapter->func_recovery_work);
4665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004666 netif_device_detach(netdev);
4667 if (netif_running(netdev)) {
4668 rtnl_lock();
4669 be_close(netdev);
4670 rtnl_unlock();
4671 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004672 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004673
4674 pci_save_state(pdev);
4675 pci_disable_device(pdev);
4676 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4677 return 0;
4678}
4679
4680static int be_resume(struct pci_dev *pdev)
4681{
4682 int status = 0;
4683 struct be_adapter *adapter = pci_get_drvdata(pdev);
4684 struct net_device *netdev = adapter->netdev;
4685
4686 netif_device_detach(netdev);
4687
4688 status = pci_enable_device(pdev);
4689 if (status)
4690 return status;
4691
Yijing Wang1ca01512013-06-27 20:53:42 +08004692 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004693 pci_restore_state(pdev);
4694
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304695 status = be_fw_wait_ready(adapter);
4696 if (status)
4697 return status;
4698
Ajit Khaparded4360d62013-11-22 12:51:09 -06004699 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004700 /* tell fw we're ready to fire cmds */
4701 status = be_cmd_fw_init(adapter);
4702 if (status)
4703 return status;
4704
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004705 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004706 if (netif_running(netdev)) {
4707 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004708 be_open(netdev);
4709 rtnl_unlock();
4710 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004711
4712 schedule_delayed_work(&adapter->func_recovery_work,
4713 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004714 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004715
Suresh Reddy76a9e082014-01-15 13:23:40 +05304716 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004717 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004718
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004719 return 0;
4720}
4721
Sathya Perla82456b02010-02-17 01:35:37 +00004722/*
4723 * An FLR will stop BE from DMAing any data.
4724 */
4725static void be_shutdown(struct pci_dev *pdev)
4726{
4727 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004728
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004729 if (!adapter)
4730 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004731
Sathya Perla0f4a6822011-03-21 20:49:28 +00004732 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004733 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004734
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004735 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004736
Ajit Khaparde57841862011-04-06 18:08:43 +00004737 be_cmd_reset_function(adapter);
4738
Sathya Perla82456b02010-02-17 01:35:37 +00004739 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004740}
4741
Sathya Perlacf588472010-02-14 21:22:01 +00004742static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4743 pci_channel_state_t state)
4744{
4745 struct be_adapter *adapter = pci_get_drvdata(pdev);
4746 struct net_device *netdev = adapter->netdev;
4747
4748 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4749
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004750 if (!adapter->eeh_error) {
4751 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004752
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004753 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004754
Sathya Perlacf588472010-02-14 21:22:01 +00004755 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004756 netif_device_detach(netdev);
4757 if (netif_running(netdev))
4758 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004759 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004760
4761 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004762 }
Sathya Perlacf588472010-02-14 21:22:01 +00004763
4764 if (state == pci_channel_io_perm_failure)
4765 return PCI_ERS_RESULT_DISCONNECT;
4766
4767 pci_disable_device(pdev);
4768
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004769 /* The error could cause the FW to trigger a flash debug dump.
4770 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004771 * can cause it not to recover; wait for it to finish.
4772 * Wait only for first function as it is needed only once per
4773 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004774 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004775 if (pdev->devfn == 0)
4776 ssleep(30);
4777
Sathya Perlacf588472010-02-14 21:22:01 +00004778 return PCI_ERS_RESULT_NEED_RESET;
4779}
4780
4781static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4782{
4783 struct be_adapter *adapter = pci_get_drvdata(pdev);
4784 int status;
4785
4786 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004787
4788 status = pci_enable_device(pdev);
4789 if (status)
4790 return PCI_ERS_RESULT_DISCONNECT;
4791
4792 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004793 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004794 pci_restore_state(pdev);
4795
4796 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004797 dev_info(&adapter->pdev->dev,
4798 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004799 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004800 if (status)
4801 return PCI_ERS_RESULT_DISCONNECT;
4802
Sathya Perlad6b6d982012-09-05 01:56:48 +00004803 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004804 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004805 return PCI_ERS_RESULT_RECOVERED;
4806}
4807
4808static void be_eeh_resume(struct pci_dev *pdev)
4809{
4810 int status = 0;
4811 struct be_adapter *adapter = pci_get_drvdata(pdev);
4812 struct net_device *netdev = adapter->netdev;
4813
4814 dev_info(&adapter->pdev->dev, "EEH resume\n");
4815
4816 pci_save_state(pdev);
4817
Kalesh AP2d177be2013-04-28 22:22:29 +00004818 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004819 if (status)
4820 goto err;
4821
Kalesh AP2d177be2013-04-28 22:22:29 +00004822 /* tell fw we're ready to fire cmds */
4823 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004824 if (status)
4825 goto err;
4826
Sathya Perlacf588472010-02-14 21:22:01 +00004827 status = be_setup(adapter);
4828 if (status)
4829 goto err;
4830
4831 if (netif_running(netdev)) {
4832 status = be_open(netdev);
4833 if (status)
4834 goto err;
4835 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004836
4837 schedule_delayed_work(&adapter->func_recovery_work,
4838 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004839 netif_device_attach(netdev);
4840 return;
4841err:
4842 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004843}
4844
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004845static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004846 .error_detected = be_eeh_err_detected,
4847 .slot_reset = be_eeh_reset,
4848 .resume = be_eeh_resume,
4849};
4850
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004851static struct pci_driver be_driver = {
4852 .name = DRV_NAME,
4853 .id_table = be_dev_ids,
4854 .probe = be_probe,
4855 .remove = be_remove,
4856 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004857 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004858 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004859 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004860};
4861
4862static int __init be_init_module(void)
4863{
Joe Perches8e95a202009-12-03 07:58:21 +00004864 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4865 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004866 printk(KERN_WARNING DRV_NAME
4867 " : Module param rx_frag_size must be 2048/4096/8192."
4868 " Using 2048\n");
4869 rx_frag_size = 2048;
4870 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004871
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004872 return pci_register_driver(&be_driver);
4873}
4874module_init(be_init_module);
4875
4876static void __exit be_exit_module(void)
4877{
4878 pci_unregister_driver(&be_driver);
4879}
4880module_exit(be_exit_module);