blob: 04ac9c6a0d3972d4e18ee91a8ce3a8bf2e141cd7 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000925 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500926 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000927 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000936 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000937 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
946
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000953
Somnath Kotur93040ae2012-06-26 22:32:10 +0000954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000983 if (unlikely(!skb))
984 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000985 }
986
Sathya Perlaee9c7992013-05-22 23:04:55 +0000987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301007 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001013 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001016 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
Sathya Perla7101e112010-03-22 20:41:12 +00001024 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001028 stopped = true;
1029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001031 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001032
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001034 } else {
1035 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301036 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001037 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 */
Sathya Perla10329df2012-06-05 19:37:18 +00001064static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065{
Sathya Perla10329df2012-06-05 19:37:18 +00001066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001068 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001069
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
Sathya Perla92bf14a2013-08-27 16:57:32 +05301074 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001080 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001083 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001084
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001100 }
1101 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001103
Sathya Perlab31c50a2009-09-17 10:30:13 -07001104 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105
1106set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301107 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001117 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118}
1119
Patrick McHardy80d5c362013-04-19 02:04:28 +00001120static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001123 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter) && vid == 0)
1127 goto ret;
1128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301130 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001131
Somnath Kotura6b74e02014-01-21 15:50:55 +05301132 status = be_vid_config(adapter);
1133 if (status) {
1134 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001135 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301136 }
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001137ret:
1138 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139}
1140
Patrick McHardy80d5c362013-04-19 02:04:28 +00001141static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142{
1143 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001144 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter) && vid == 0)
1148 goto ret;
1149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301151 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001152 if (!status)
1153 adapter->vlans_added--;
1154 else
1155 adapter->vlan_tag[vid] = 1;
1156ret:
1157 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158}
1159
Sathya Perlaa54769f2011-10-24 02:45:00 +00001160static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161{
1162 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001163 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164
1165 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001166 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001167 adapter->promiscuous = true;
1168 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001170
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001171 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001172 if (adapter->promiscuous) {
1173 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001174 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001175
1176 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001177 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001178 }
1179
Sathya Perlae7b909a2009-11-22 22:01:10 +00001180 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001181 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301182 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001183 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001184 goto done;
1185 }
1186
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001187 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1188 struct netdev_hw_addr *ha;
1189 int i = 1; /* First slot is claimed by the Primary MAC */
1190
1191 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1192 be_cmd_pmac_del(adapter, adapter->if_handle,
1193 adapter->pmac_id[i], 0);
1194 }
1195
Sathya Perla92bf14a2013-08-27 16:57:32 +05301196 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001197 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1198 adapter->promiscuous = true;
1199 goto done;
1200 }
1201
1202 netdev_for_each_uc_addr(ha, adapter->netdev) {
1203 adapter->uc_macs++; /* First slot is for Primary MAC */
1204 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1205 adapter->if_handle,
1206 &adapter->pmac_id[adapter->uc_macs], 0);
1207 }
1208 }
1209
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001210 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1211
1212 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1213 if (status) {
1214 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1215 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1216 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1217 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001218done:
1219 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220}
1221
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001222static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1223{
1224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001225 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001226 int status;
1227
Sathya Perla11ac75e2011-12-13 00:58:50 +00001228 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001229 return -EPERM;
1230
Sathya Perla11ac75e2011-12-13 00:58:50 +00001231 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001232 return -EINVAL;
1233
Sathya Perla3175d8c2013-07-23 15:25:03 +05301234 if (BEx_chip(adapter)) {
1235 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1236 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1239 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301240 } else {
1241 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1242 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001243 }
1244
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001245 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001246 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1247 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001248 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001249 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001250
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001251 return status;
1252}
1253
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001254static int be_get_vf_config(struct net_device *netdev, int vf,
1255 struct ifla_vf_info *vi)
1256{
1257 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001258 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001259
Sathya Perla11ac75e2011-12-13 00:58:50 +00001260 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001261 return -EPERM;
1262
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001264 return -EINVAL;
1265
1266 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001268 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1269 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001271
1272 return 0;
1273}
1274
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001275static int be_set_vf_vlan(struct net_device *netdev,
1276 int vf, u16 vlan, u8 qos)
1277{
1278 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001279 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001280 int status = 0;
1281
Sathya Perla11ac75e2011-12-13 00:58:50 +00001282 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001283 return -EPERM;
1284
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001285 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001286 return -EINVAL;
1287
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001288 if (vlan || qos) {
1289 vlan |= qos << VLAN_PRIO_SHIFT;
1290 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001291 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001292 vf_cfg->vlan_tag = vlan;
1293 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1294 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001295 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001296 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001297 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001298 vf_cfg->vlan_tag = 0;
1299 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001300 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001301 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001302 }
1303
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001304
1305 if (status)
1306 dev_info(&adapter->pdev->dev,
1307 "VLAN %d config on VF %d failed\n", vlan, vf);
1308 return status;
1309}
1310
Ajit Khapardee1d18732010-07-23 01:52:13 +00001311static int be_set_vf_tx_rate(struct net_device *netdev,
1312 int vf, int rate)
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
1315 int status = 0;
1316
Sathya Perla11ac75e2011-12-13 00:58:50 +00001317 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001318 return -EPERM;
1319
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001320 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001321 return -EINVAL;
1322
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001323 if (rate < 100 || rate > 10000) {
1324 dev_err(&adapter->pdev->dev,
1325 "tx rate must be between 100 and 10000 Mbps\n");
1326 return -EINVAL;
1327 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001328
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001329 if (lancer_chip(adapter))
1330 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1331 else
1332 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001333
1334 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001335 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001336 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001337 else
1338 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001339 return status;
1340}
1341
Sathya Perla2632baf2013-10-01 16:00:00 +05301342static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1343 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344{
Sathya Perla2632baf2013-10-01 16:00:00 +05301345 aic->rx_pkts_prev = rx_pkts;
1346 aic->tx_reqs_prev = tx_pkts;
1347 aic->jiffies = now;
1348}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001349
Sathya Perla2632baf2013-10-01 16:00:00 +05301350static void be_eqd_update(struct be_adapter *adapter)
1351{
1352 struct be_set_eqd set_eqd[MAX_EVT_QS];
1353 int eqd, i, num = 0, start;
1354 struct be_aic_obj *aic;
1355 struct be_eq_obj *eqo;
1356 struct be_rx_obj *rxo;
1357 struct be_tx_obj *txo;
1358 u64 rx_pkts, tx_pkts;
1359 ulong now;
1360 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001361
Sathya Perla2632baf2013-10-01 16:00:00 +05301362 for_all_evt_queues(adapter, eqo, i) {
1363 aic = &adapter->aic_obj[eqo->idx];
1364 if (!aic->enable) {
1365 if (aic->jiffies)
1366 aic->jiffies = 0;
1367 eqd = aic->et_eqd;
1368 goto modify_eqd;
1369 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Sathya Perla2632baf2013-10-01 16:00:00 +05301371 rxo = &adapter->rx_obj[eqo->idx];
1372 do {
1373 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1374 rx_pkts = rxo->stats.rx_pkts;
1375 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376
Sathya Perla2632baf2013-10-01 16:00:00 +05301377 txo = &adapter->tx_obj[eqo->idx];
1378 do {
1379 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1380 tx_pkts = txo->stats.tx_reqs;
1381 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001382
Sathya Perla4097f662009-03-24 16:40:13 -07001383
Sathya Perla2632baf2013-10-01 16:00:00 +05301384 /* Skip, if wrapped around or first calculation */
1385 now = jiffies;
1386 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1387 rx_pkts < aic->rx_pkts_prev ||
1388 tx_pkts < aic->tx_reqs_prev) {
1389 be_aic_update(aic, rx_pkts, tx_pkts, now);
1390 continue;
1391 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001392
Sathya Perla2632baf2013-10-01 16:00:00 +05301393 delta = jiffies_to_msecs(now - aic->jiffies);
1394 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1395 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1396 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001397
Sathya Perla2632baf2013-10-01 16:00:00 +05301398 if (eqd < 8)
1399 eqd = 0;
1400 eqd = min_t(u32, eqd, aic->max_eqd);
1401 eqd = max_t(u32, eqd, aic->min_eqd);
1402
1403 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001404modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301405 if (eqd != aic->prev_eqd) {
1406 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1407 set_eqd[num].eq_id = eqo->q.id;
1408 aic->prev_eqd = eqd;
1409 num++;
1410 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001411 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301412
1413 if (num)
1414 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001415}
1416
Sathya Perla3abcded2010-10-03 22:12:27 -07001417static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001418 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001419{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001420 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001421
Sathya Perlaab1594e2011-07-25 19:10:15 +00001422 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001423 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001425 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001426 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001429 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001430 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431}
1432
Sathya Perla2e588f82011-03-11 02:49:26 +00001433static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001434{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001435 /* L4 checksum is not reliable for non TCP/UDP packets.
1436 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001437 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1438 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001439}
1440
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301441static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001443 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001445 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301446 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447
Sathya Perla3abcded2010-10-03 22:12:27 -07001448 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 BUG_ON(!rx_page_info->page);
1450
Ajit Khaparde205859a2010-02-09 01:34:21 +00001451 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001452 dma_unmap_page(&adapter->pdev->dev,
1453 dma_unmap_addr(rx_page_info, bus),
1454 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001455 rx_page_info->last_page_user = false;
1456 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301458 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459 atomic_dec(&rxq->used);
1460 return rx_page_info;
1461}
1462
1463/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001464static void be_rx_compl_discard(struct be_rx_obj *rxo,
1465 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001468 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001470 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301471 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001472 put_page(page_info->page);
1473 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 }
1475}
1476
1477/*
1478 * skb_fill_rx_data forms a complete skb for an ether frame
1479 * indicated by rxcp.
1480 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001481static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1482 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001485 u16 i, j;
1486 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 u8 *start;
1488
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301489 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 start = page_address(page_info->page) + page_info->page_offset;
1491 prefetch(start);
1492
1493 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001494 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 skb->len = curr_frag_len;
1497 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001498 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 /* Complete packet has now been moved to data */
1500 put_page(page_info->page);
1501 skb->data_len = 0;
1502 skb->tail += curr_frag_len;
1503 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001504 hdr_len = ETH_HLEN;
1505 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001507 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 skb_shinfo(skb)->frags[0].page_offset =
1509 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001510 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001512 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 skb->tail += hdr_len;
1514 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001515 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516
Sathya Perla2e588f82011-03-11 02:49:26 +00001517 if (rxcp->pkt_size <= rx_frag_size) {
1518 BUG_ON(rxcp->num_rcvd != 1);
1519 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 }
1521
1522 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 remaining = rxcp->pkt_size - curr_frag_len;
1524 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301525 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001526 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001528 /* Coalesce all frags from the same physical page in one slot */
1529 if (page_info->page_offset == 0) {
1530 /* Fresh page */
1531 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001532 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001533 skb_shinfo(skb)->frags[j].page_offset =
1534 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001535 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001536 skb_shinfo(skb)->nr_frags++;
1537 } else {
1538 put_page(page_info->page);
1539 }
1540
Eric Dumazet9e903e02011-10-18 21:00:24 +00001541 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 skb->len += curr_frag_len;
1543 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001544 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001545 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001546 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001548 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549}
1550
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001551/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301552static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001553 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001555 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001556 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001558
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001559 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001560 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001561 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001562 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 return;
1564 }
1565
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001566 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001568 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001569 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001570 else
1571 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001573 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001574 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001575 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001576 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301577 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Jiri Pirko343e43c2011-08-25 02:50:51 +00001579 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001580 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001581
1582 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583}
1584
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001585/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001586static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1587 struct napi_struct *napi,
1588 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001592 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001593 u16 remaining, curr_frag_len;
1594 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001595
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001597 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001598 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001599 return;
1600 }
1601
Sathya Perla2e588f82011-03-11 02:49:26 +00001602 remaining = rxcp->pkt_size;
1603 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301604 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605
1606 curr_frag_len = min(remaining, rx_frag_size);
1607
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001608 /* Coalesce all frags from the same physical page in one slot */
1609 if (i == 0 || page_info->page_offset == 0) {
1610 /* First frag or Fresh page */
1611 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001612 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001613 skb_shinfo(skb)->frags[j].page_offset =
1614 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001615 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001616 } else {
1617 put_page(page_info->page);
1618 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001619 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001620 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 memset(page_info, 0, sizeof(*page_info));
1623 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001624 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001626 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001627 skb->len = rxcp->pkt_size;
1628 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001629 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001630 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001631 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001632 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301633 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001634
Jiri Pirko343e43c2011-08-25 02:50:51 +00001635 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639}
1640
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001641static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1642 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643{
Sathya Perla2e588f82011-03-11 02:49:26 +00001644 rxcp->pkt_size =
1645 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1646 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1647 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1648 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001649 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001650 rxcp->ip_csum =
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1652 rxcp->l4_csum =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1654 rxcp->ipv6 =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001656 rxcp->num_rcvd =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1658 rxcp->pkt_type =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001660 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001662 if (rxcp->vlanf) {
1663 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001664 compl);
1665 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1666 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001667 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001668 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001669}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001671static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1672 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001673{
1674 rxcp->pkt_size =
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1676 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1677 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1678 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001679 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001680 rxcp->ip_csum =
1681 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1682 rxcp->l4_csum =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1684 rxcp->ipv6 =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001686 rxcp->num_rcvd =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1688 rxcp->pkt_type =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001690 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001692 if (rxcp->vlanf) {
1693 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001694 compl);
1695 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1696 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001697 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001698 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001699 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1700 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001701}
1702
1703static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1704{
1705 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1706 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1707 struct be_adapter *adapter = rxo->adapter;
1708
1709 /* For checking the valid bit it is Ok to use either definition as the
1710 * valid bit is at the same position in both v0 and v1 Rx compl */
1711 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 return NULL;
1713
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001714 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001715 be_dws_le_to_cpu(compl, sizeof(*compl));
1716
1717 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001718 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001720 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001721
Somnath Koture38b1702013-05-29 22:55:56 +00001722 if (rxcp->ip_frag)
1723 rxcp->l4_csum = 0;
1724
Sathya Perla15d72182011-03-21 20:49:26 +00001725 if (rxcp->vlanf) {
1726 /* vlanf could be wrongly set in some cards.
1727 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001728 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001729 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001730
Sathya Perla15d72182011-03-21 20:49:26 +00001731 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001732 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001733
Somnath Kotur939cf302011-08-18 21:51:49 -07001734 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001735 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001736 rxcp->vlanf = 0;
1737 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001738
1739 /* As the compl has been parsed, reset it; we wont touch it again */
1740 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741
Sathya Perla3abcded2010-10-03 22:12:27 -07001742 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 return rxcp;
1744}
1745
Eric Dumazet1829b082011-03-01 05:48:12 +00001746static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001749
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001751 gfp |= __GFP_COMP;
1752 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753}
1754
1755/*
1756 * Allocate a page, split it to fragments of size rx_frag_size and post as
1757 * receive buffers to BE
1758 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001759static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760{
Sathya Perla3abcded2010-10-03 22:12:27 -07001761 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001762 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001763 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001765 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 struct be_eth_rx_d *rxd;
1767 u64 page_dmaaddr = 0, frag_dmaaddr;
1768 u32 posted, page_offset = 0;
1769
Sathya Perla3abcded2010-10-03 22:12:27 -07001770 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1772 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001773 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001775 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 break;
1777 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001778 page_dmaaddr = dma_map_page(dev, pagep, 0,
1779 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001780 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001781 if (dma_mapping_error(dev, page_dmaaddr)) {
1782 put_page(pagep);
1783 pagep = NULL;
1784 rx_stats(rxo)->rx_post_fail++;
1785 break;
1786 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787 page_info->page_offset = 0;
1788 } else {
1789 get_page(pagep);
1790 page_info->page_offset = page_offset + rx_frag_size;
1791 }
1792 page_offset = page_info->page_offset;
1793 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001794 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1796
1797 rxd = queue_head_node(rxq);
1798 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1799 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
1801 /* Any space left in the current big page for another frag? */
1802 if ((page_offset + rx_frag_size + rx_frag_size) >
1803 adapter->big_page_size) {
1804 pagep = NULL;
1805 page_info->last_page_user = true;
1806 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001807
1808 prev_page_info = page_info;
1809 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001810 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 }
1812 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001813 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
1815 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301817 if (rxo->rx_post_starved)
1818 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001819 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001820 } else if (atomic_read(&rxq->used) == 0) {
1821 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001822 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824}
1825
Sathya Perla5fb379e2009-06-18 00:02:59 +00001826static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1829
1830 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1831 return NULL;
1832
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001833 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1835
1836 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1837
1838 queue_tail_inc(tx_cq);
1839 return txcp;
1840}
1841
Sathya Perla3c8def92011-06-12 20:01:58 +00001842static u16 be_tx_compl_process(struct be_adapter *adapter,
1843 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844{
Sathya Perla3c8def92011-06-12 20:01:58 +00001845 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001846 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001847 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001849 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1850 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001852 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001854 sent_skbs[txq->tail] = NULL;
1855
1856 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001857 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001859 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001861 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001862 unmap_tx_frag(&adapter->pdev->dev, wrb,
1863 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001864 unmap_skb_hdr = false;
1865
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866 num_wrbs++;
1867 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001868 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001871 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872}
1873
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001874/* Return the number of events in the event queue */
1875static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001876{
1877 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001878 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001879
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001880 do {
1881 eqe = queue_tail_node(&eqo->q);
1882 if (eqe->evt == 0)
1883 break;
1884
1885 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001886 eqe->evt = 0;
1887 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888 queue_tail_inc(&eqo->q);
1889 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001890
1891 return num;
1892}
1893
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894/* Leaves the EQ is disarmed state */
1895static void be_eq_clean(struct be_eq_obj *eqo)
1896{
1897 int num = events_get(eqo);
1898
1899 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1900}
1901
1902static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903{
1904 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 struct be_queue_info *rxq = &rxo->q;
1906 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001907 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001908 struct be_adapter *adapter = rxo->adapter;
1909 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perlad23e9462012-12-17 19:38:51 +00001911 /* Consume pending rx completions.
1912 * Wait for the flush completion (identified by zero num_rcvd)
1913 * to arrive. Notify CQ even when there are no more CQ entries
1914 * for HW to flush partially coalesced CQ entries.
1915 * In Lancer, there is no need to wait for flush compl.
1916 */
1917 for (;;) {
1918 rxcp = be_rx_compl_get(rxo);
1919 if (rxcp == NULL) {
1920 if (lancer_chip(adapter))
1921 break;
1922
1923 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1924 dev_warn(&adapter->pdev->dev,
1925 "did not receive flush compl\n");
1926 break;
1927 }
1928 be_cq_notify(adapter, rx_cq->id, true, 0);
1929 mdelay(1);
1930 } else {
1931 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001932 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001933 if (rxcp->num_rcvd == 0)
1934 break;
1935 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 }
1937
Sathya Perlad23e9462012-12-17 19:38:51 +00001938 /* After cleanup, leave the CQ in unarmed state */
1939 be_cq_notify(adapter, rx_cq->id, false, 0);
1940
1941 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301942 while (atomic_read(&rxq->used) > 0) {
1943 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944 put_page(page_info->page);
1945 memset(page_info, 0, sizeof(*page_info));
1946 }
1947 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001948 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949}
1950
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001951static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001953 struct be_tx_obj *txo;
1954 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001955 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001956 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001957 struct sk_buff *sent_skb;
1958 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001959 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960
Sathya Perlaa8e91792009-08-10 03:42:43 +00001961 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1962 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001963 pending_txqs = adapter->num_tx_qs;
1964
1965 for_all_tx_queues(adapter, txo, i) {
1966 txq = &txo->q;
1967 while ((txcp = be_tx_compl_get(&txo->cq))) {
1968 end_idx =
1969 AMAP_GET_BITS(struct amap_eth_tx_compl,
1970 wrb_index, txcp);
1971 num_wrbs += be_tx_compl_process(adapter, txo,
1972 end_idx);
1973 cmpl++;
1974 }
1975 if (cmpl) {
1976 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1977 atomic_sub(num_wrbs, &txq->used);
1978 cmpl = 0;
1979 num_wrbs = 0;
1980 }
1981 if (atomic_read(&txq->used) == 0)
1982 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001983 }
1984
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001985 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001986 break;
1987
1988 mdelay(1);
1989 } while (true);
1990
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001991 for_all_tx_queues(adapter, txo, i) {
1992 txq = &txo->q;
1993 if (atomic_read(&txq->used))
1994 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1995 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001996
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001997 /* free posted tx for which compls will never arrive */
1998 while (atomic_read(&txq->used)) {
1999 sent_skb = txo->sent_skb_list[txq->tail];
2000 end_idx = txq->tail;
2001 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2002 &dummy_wrb);
2003 index_adv(&end_idx, num_wrbs - 1, txq->len);
2004 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2005 atomic_sub(num_wrbs, &txq->used);
2006 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002007 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008}
2009
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010static void be_evt_queues_destroy(struct be_adapter *adapter)
2011{
2012 struct be_eq_obj *eqo;
2013 int i;
2014
2015 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002016 if (eqo->q.created) {
2017 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302019 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302020 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002021 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 be_queue_free(adapter, &eqo->q);
2023 }
2024}
2025
2026static int be_evt_queues_create(struct be_adapter *adapter)
2027{
2028 struct be_queue_info *eq;
2029 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302030 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031 int i, rc;
2032
Sathya Perla92bf14a2013-08-27 16:57:32 +05302033 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2034 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035
2036 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302037 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2038 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302039 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302040 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041 eqo->adapter = adapter;
2042 eqo->tx_budget = BE_TX_BUDGET;
2043 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302044 aic->max_eqd = BE_MAX_EQD;
2045 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002046
2047 eq = &eqo->q;
2048 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2049 sizeof(struct be_eq_entry));
2050 if (rc)
2051 return rc;
2052
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302053 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002054 if (rc)
2055 return rc;
2056 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002057 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058}
2059
Sathya Perla5fb379e2009-06-18 00:02:59 +00002060static void be_mcc_queues_destroy(struct be_adapter *adapter)
2061{
2062 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002063
Sathya Perla8788fdc2009-07-27 22:52:03 +00002064 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002065 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002066 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002067 be_queue_free(adapter, q);
2068
Sathya Perla8788fdc2009-07-27 22:52:03 +00002069 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002070 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002071 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002072 be_queue_free(adapter, q);
2073}
2074
2075/* Must be called only after TX qs are created as MCC shares TX EQ */
2076static int be_mcc_queues_create(struct be_adapter *adapter)
2077{
2078 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002079
Sathya Perla8788fdc2009-07-27 22:52:03 +00002080 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002081 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002082 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002083 goto err;
2084
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002085 /* Use the default EQ for MCC completions */
2086 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002087 goto mcc_cq_free;
2088
Sathya Perla8788fdc2009-07-27 22:52:03 +00002089 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2091 goto mcc_cq_destroy;
2092
Sathya Perla8788fdc2009-07-27 22:52:03 +00002093 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002094 goto mcc_q_free;
2095
2096 return 0;
2097
2098mcc_q_free:
2099 be_queue_free(adapter, q);
2100mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002101 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002102mcc_cq_free:
2103 be_queue_free(adapter, cq);
2104err:
2105 return -1;
2106}
2107
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108static void be_tx_queues_destroy(struct be_adapter *adapter)
2109{
2110 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002111 struct be_tx_obj *txo;
2112 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113
Sathya Perla3c8def92011-06-12 20:01:58 +00002114 for_all_tx_queues(adapter, txo, i) {
2115 q = &txo->q;
2116 if (q->created)
2117 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2118 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119
Sathya Perla3c8def92011-06-12 20:01:58 +00002120 q = &txo->cq;
2121 if (q->created)
2122 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2123 be_queue_free(adapter, q);
2124 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125}
2126
Sathya Perla77071332013-08-27 16:57:34 +05302127static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002130 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302131 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
Sathya Perla92bf14a2013-08-27 16:57:32 +05302133 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002134
Sathya Perla3c8def92011-06-12 20:01:58 +00002135 for_all_tx_queues(adapter, txo, i) {
2136 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2138 sizeof(struct be_eth_tx_compl));
2139 if (status)
2140 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141
John Stultz827da442013-10-07 15:51:58 -07002142 u64_stats_init(&txo->stats.sync);
2143 u64_stats_init(&txo->stats.sync_compl);
2144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 /* If num_evt_qs is less than num_tx_qs, then more than
2146 * one txq share an eq
2147 */
2148 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2149 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2150 if (status)
2151 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2154 sizeof(struct be_eth_wrb));
2155 if (status)
2156 return status;
2157
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002158 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 if (status)
2160 return status;
2161 }
2162
Sathya Perlad3791422012-09-28 04:39:44 +00002163 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2164 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 return 0;
2166}
2167
2168static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169{
2170 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 struct be_rx_obj *rxo;
2172 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173
Sathya Perla3abcded2010-10-03 22:12:27 -07002174 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 q = &rxo->cq;
2176 if (q->created)
2177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2178 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180}
2181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002183{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002185 struct be_rx_obj *rxo;
2186 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187
Sathya Perla92bf14a2013-08-27 16:57:32 +05302188 /* We can create as many RSS rings as there are EQs. */
2189 adapter->num_rx_qs = adapter->num_evt_qs;
2190
2191 /* We'll use RSS only if atleast 2 RSS rings are supported.
2192 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302194 if (adapter->num_rx_qs > 1)
2195 adapter->num_rx_qs++;
2196
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 for_all_rx_queues(adapter, rxo, i) {
2199 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 cq = &rxo->cq;
2201 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2202 sizeof(struct be_eth_rx_compl));
2203 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205
John Stultz827da442013-10-07 15:51:58 -07002206 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2208 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212
Sathya Perlad3791422012-09-28 04:39:44 +00002213 dev_info(&adapter->pdev->dev,
2214 "created %d RSS queue(s) and 1 default RX queue\n",
2215 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002217}
2218
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219static irqreturn_t be_intx(int irq, void *dev)
2220{
Sathya Perlae49cc342012-11-27 19:50:02 +00002221 struct be_eq_obj *eqo = dev;
2222 struct be_adapter *adapter = eqo->adapter;
2223 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002225 /* IRQ is not expected when NAPI is scheduled as the EQ
2226 * will not be armed.
2227 * But, this can happen on Lancer INTx where it takes
2228 * a while to de-assert INTx or in BE2 where occasionaly
2229 * an interrupt may be raised even when EQ is unarmed.
2230 * If NAPI is already scheduled, then counting & notifying
2231 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002232 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002233 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002234 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002235 __napi_schedule(&eqo->napi);
2236 if (num_evts)
2237 eqo->spurious_intr = 0;
2238 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002239 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002240
2241 /* Return IRQ_HANDLED only for the the first spurious intr
2242 * after a valid intr to stop the kernel from branding
2243 * this irq as a bad one!
2244 */
2245 if (num_evts || eqo->spurious_intr++ == 0)
2246 return IRQ_HANDLED;
2247 else
2248 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249}
2250
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
Sathya Perla0b545a62012-11-23 00:27:18 +00002255 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2256 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 return IRQ_HANDLED;
2258}
2259
Sathya Perla2e588f82011-03-11 02:49:26 +00002260static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261{
Somnath Koture38b1702013-05-29 22:55:56 +00002262 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263}
2264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302266 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267{
Sathya Perla3abcded2010-10-03 22:12:27 -07002268 struct be_adapter *adapter = rxo->adapter;
2269 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002270 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 u32 work_done;
2272
2273 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002274 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275 if (!rxcp)
2276 break;
2277
Sathya Perla12004ae2011-08-02 19:57:46 +00002278 /* Is it a flush compl that has no data */
2279 if (unlikely(rxcp->num_rcvd == 0))
2280 goto loop_continue;
2281
2282 /* Discard compl with partial DMA Lancer B0 */
2283 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002285 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002286 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002287
Sathya Perla12004ae2011-08-02 19:57:46 +00002288 /* On BE drop pkts that arrive due to imperfect filtering in
2289 * promiscuous mode on some skews
2290 */
2291 if (unlikely(rxcp->port != adapter->port_num &&
2292 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002294 goto loop_continue;
2295 }
2296
Sathya Perla6384a4d2013-10-25 10:40:16 +05302297 /* Don't do gro when we're busy_polling */
2298 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002300 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302301 be_rx_compl_process(rxo, napi, rxcp);
2302
Sathya Perla12004ae2011-08-02 19:57:46 +00002303loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002304 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 }
2306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 if (work_done) {
2308 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002309
Sathya Perla6384a4d2013-10-25 10:40:16 +05302310 /* When an rx-obj gets into post_starved state, just
2311 * let be_worker do the posting.
2312 */
2313 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2314 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 return work_done;
2319}
2320
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2322 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 for (work_done = 0; work_done < budget; work_done++) {
2328 txcp = be_tx_compl_get(&txo->cq);
2329 if (!txcp)
2330 break;
2331 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002332 AMAP_GET_BITS(struct amap_eth_tx_compl,
2333 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 }
2335
2336 if (work_done) {
2337 be_cq_notify(adapter, txo->cq.id, true, work_done);
2338 atomic_sub(num_wrbs, &txo->q.used);
2339
2340 /* As Tx wrbs have been freed up, wake up netdev queue
2341 * if it was stopped due to lack of tx wrbs. */
2342 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2343 atomic_read(&txo->q.used) < txo->q.len / 2) {
2344 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002345 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002346
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002347 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2348 tx_stats(txo)->tx_compl += work_done;
2349 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2350 }
2351 return (work_done < budget); /* Done */
2352}
Sathya Perla3c8def92011-06-12 20:01:58 +00002353
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302354int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355{
2356 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2357 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002358 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302359 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002361
Sathya Perla0b545a62012-11-23 00:27:18 +00002362 num_evts = events_get(eqo);
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 /* Process all TXQs serviced by this EQ */
2365 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2366 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2367 eqo->tx_budget, i);
2368 if (!tx_done)
2369 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370 }
2371
Sathya Perla6384a4d2013-10-25 10:40:16 +05302372 if (be_lock_napi(eqo)) {
2373 /* This loop will iterate twice for EQ0 in which
2374 * completions of the last RXQ (default one) are also processed
2375 * For other EQs the loop iterates only once
2376 */
2377 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2378 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2379 max_work = max(work, max_work);
2380 }
2381 be_unlock_napi(eqo);
2382 } else {
2383 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002384 }
2385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 if (is_mcc_eqo(eqo))
2387 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002388
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 if (max_work < budget) {
2390 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002391 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392 } else {
2393 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002394 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002395 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397}
2398
Sathya Perla6384a4d2013-10-25 10:40:16 +05302399#ifdef CONFIG_NET_RX_BUSY_POLL
2400static int be_busy_poll(struct napi_struct *napi)
2401{
2402 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2403 struct be_adapter *adapter = eqo->adapter;
2404 struct be_rx_obj *rxo;
2405 int i, work = 0;
2406
2407 if (!be_lock_busy_poll(eqo))
2408 return LL_FLUSH_BUSY;
2409
2410 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2411 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2412 if (work)
2413 break;
2414 }
2415
2416 be_unlock_busy_poll(eqo);
2417 return work;
2418}
2419#endif
2420
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002421void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002422{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002423 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2424 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002425 u32 i;
2426
Sathya Perlad23e9462012-12-17 19:38:51 +00002427 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002428 return;
2429
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002430 if (lancer_chip(adapter)) {
2431 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2432 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2433 sliport_err1 = ioread32(adapter->db +
2434 SLIPORT_ERROR1_OFFSET);
2435 sliport_err2 = ioread32(adapter->db +
2436 SLIPORT_ERROR2_OFFSET);
2437 }
2438 } else {
2439 pci_read_config_dword(adapter->pdev,
2440 PCICFG_UE_STATUS_LOW, &ue_lo);
2441 pci_read_config_dword(adapter->pdev,
2442 PCICFG_UE_STATUS_HIGH, &ue_hi);
2443 pci_read_config_dword(adapter->pdev,
2444 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2445 pci_read_config_dword(adapter->pdev,
2446 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002447
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002448 ue_lo = (ue_lo & ~ue_lo_mask);
2449 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002450 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002451
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002452 /* On certain platforms BE hardware can indicate spurious UEs.
2453 * Allow the h/w to stop working completely in case of a real UE.
2454 * Hence not setting the hw_error for UE detection.
2455 */
2456 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002457 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302458 /* Do not log error messages if its a FW reset */
2459 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2460 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2461 dev_info(&adapter->pdev->dev,
2462 "Firmware update in progress\n");
2463 return;
2464 } else {
2465 dev_err(&adapter->pdev->dev,
2466 "Error detected in the card\n");
2467 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002468 }
2469
2470 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2471 dev_err(&adapter->pdev->dev,
2472 "ERR: sliport status 0x%x\n", sliport_status);
2473 dev_err(&adapter->pdev->dev,
2474 "ERR: sliport error1 0x%x\n", sliport_err1);
2475 dev_err(&adapter->pdev->dev,
2476 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002477 }
2478
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002479 if (ue_lo) {
2480 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2481 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002482 dev_err(&adapter->pdev->dev,
2483 "UE: %s bit set\n", ue_status_low_desc[i]);
2484 }
2485 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002486
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002487 if (ue_hi) {
2488 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2489 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002490 dev_err(&adapter->pdev->dev,
2491 "UE: %s bit set\n", ue_status_hi_desc[i]);
2492 }
2493 }
2494
2495}
2496
Sathya Perla8d56ff12009-11-22 22:02:26 +00002497static void be_msix_disable(struct be_adapter *adapter)
2498{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002499 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002500 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002501 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302502 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002503 }
2504}
2505
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002506static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302508 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002509 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510
Sathya Perla92bf14a2013-08-27 16:57:32 +05302511 /* If RoCE is supported, program the max number of NIC vectors that
2512 * may be configured via set-channels, along with vectors needed for
2513 * RoCe. Else, just program the number we'll use initially.
2514 */
2515 if (be_roce_supported(adapter))
2516 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2517 2 * num_online_cpus());
2518 else
2519 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002520
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002521 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522 adapter->msix_entries[i].entry = i;
2523
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002524 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002525 if (status == 0) {
2526 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302527 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002528 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002529 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2530 num_vec);
2531 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002532 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002533 }
Sathya Perlad3791422012-09-28 04:39:44 +00002534
2535 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302536
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002537 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2538 if (!be_physfn(adapter))
2539 return status;
2540 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002541done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302542 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2543 adapter->num_msix_roce_vec = num_vec / 2;
2544 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2545 adapter->num_msix_roce_vec);
2546 }
2547
2548 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2549
2550 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2551 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002552 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553}
2554
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002555static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002556 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302558 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002559}
2560
2561static int be_msix_register(struct be_adapter *adapter)
2562{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002563 struct net_device *netdev = adapter->netdev;
2564 struct be_eq_obj *eqo;
2565 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 for_all_evt_queues(adapter, eqo, i) {
2568 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2569 vec = be_msix_vec_get(adapter, eqo);
2570 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002571 if (status)
2572 goto err_msix;
2573 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002574
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002576err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002577 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2578 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2579 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2580 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002581 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582 return status;
2583}
2584
2585static int be_irq_register(struct be_adapter *adapter)
2586{
2587 struct net_device *netdev = adapter->netdev;
2588 int status;
2589
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002590 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591 status = be_msix_register(adapter);
2592 if (status == 0)
2593 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002594 /* INTx is not supported for VF */
2595 if (!be_physfn(adapter))
2596 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597 }
2598
Sathya Perlae49cc342012-11-27 19:50:02 +00002599 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 netdev->irq = adapter->pdev->irq;
2601 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002602 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603 if (status) {
2604 dev_err(&adapter->pdev->dev,
2605 "INTx request IRQ failed - err %d\n", status);
2606 return status;
2607 }
2608done:
2609 adapter->isr_registered = true;
2610 return 0;
2611}
2612
2613static void be_irq_unregister(struct be_adapter *adapter)
2614{
2615 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002617 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618
2619 if (!adapter->isr_registered)
2620 return;
2621
2622 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002623 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002624 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002625 goto done;
2626 }
2627
2628 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629 for_all_evt_queues(adapter, eqo, i)
2630 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002631
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632done:
2633 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634}
2635
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002637{
2638 struct be_queue_info *q;
2639 struct be_rx_obj *rxo;
2640 int i;
2641
2642 for_all_rx_queues(adapter, rxo, i) {
2643 q = &rxo->q;
2644 if (q->created) {
2645 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002647 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002649 }
2650}
2651
Sathya Perla889cd4b2010-05-30 23:33:45 +00002652static int be_close(struct net_device *netdev)
2653{
2654 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655 struct be_eq_obj *eqo;
2656 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002657
Parav Pandit045508a2012-03-26 14:27:13 +00002658 be_roce_dev_close(adapter);
2659
Ivan Veceradff345c52013-11-27 08:59:32 +01002660 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2661 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002662 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302663 be_disable_busy_poll(eqo);
2664 }
David S. Miller71237b62013-11-28 18:53:36 -05002665 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002666 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002667
2668 be_async_mcc_disable(adapter);
2669
2670 /* Wait for all pending tx completions to arrive so that
2671 * all tx skbs are freed.
2672 */
Sathya Perlafba87552013-05-08 02:05:50 +00002673 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302674 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002675
2676 be_rx_qs_destroy(adapter);
2677
Ajit Khaparded11a3472013-11-18 10:44:37 -06002678 for (i = 1; i < (adapter->uc_macs + 1); i++)
2679 be_cmd_pmac_del(adapter, adapter->if_handle,
2680 adapter->pmac_id[i], 0);
2681 adapter->uc_macs = 0;
2682
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002683 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002684 if (msix_enabled(adapter))
2685 synchronize_irq(be_msix_vec_get(adapter, eqo));
2686 else
2687 synchronize_irq(netdev->irq);
2688 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002689 }
2690
Sathya Perla889cd4b2010-05-30 23:33:45 +00002691 be_irq_unregister(adapter);
2692
Sathya Perla482c9e72011-06-29 23:33:17 +00002693 return 0;
2694}
2695
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002696static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002697{
2698 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002699 int rc, i, j;
2700 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002701
2702 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002703 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2704 sizeof(struct be_eth_rx_d));
2705 if (rc)
2706 return rc;
2707 }
2708
2709 /* The FW would like the default RXQ to be created first */
2710 rxo = default_rxo(adapter);
2711 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2712 adapter->if_handle, false, &rxo->rss_id);
2713 if (rc)
2714 return rc;
2715
2716 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002717 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002718 rx_frag_size, adapter->if_handle,
2719 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002720 if (rc)
2721 return rc;
2722 }
2723
2724 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002725 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2726 for_all_rss_queues(adapter, rxo, i) {
2727 if ((j + i) >= 128)
2728 break;
2729 rsstable[j + i] = rxo->rss_id;
2730 }
2731 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002732 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2733 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2734
2735 if (!BEx_chip(adapter))
2736 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2737 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302738 } else {
2739 /* Disable RSS, if only default RX Q is created */
2740 adapter->rss_flags = RSS_ENABLE_NONE;
2741 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002742
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302743 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2744 128);
2745 if (rc) {
2746 adapter->rss_flags = RSS_ENABLE_NONE;
2747 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002748 }
2749
2750 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002752 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002753 return 0;
2754}
2755
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756static int be_open(struct net_device *netdev)
2757{
2758 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002762 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002763 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002764
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002765 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002766 if (status)
2767 goto err;
2768
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002769 status = be_irq_register(adapter);
2770 if (status)
2771 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002772
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002773 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002774 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002776 for_all_tx_queues(adapter, txo, i)
2777 be_cq_notify(adapter, txo->cq.id, true, 0);
2778
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002779 be_async_mcc_enable(adapter);
2780
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 for_all_evt_queues(adapter, eqo, i) {
2782 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302783 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002784 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2785 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002786 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002787
Sathya Perla323ff712012-09-28 04:39:43 +00002788 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002789 if (!status)
2790 be_link_status_update(adapter, link_status);
2791
Sathya Perlafba87552013-05-08 02:05:50 +00002792 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002793 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002794 return 0;
2795err:
2796 be_close(adapter->netdev);
2797 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002798}
2799
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002800static int be_setup_wol(struct be_adapter *adapter, bool enable)
2801{
2802 struct be_dma_mem cmd;
2803 int status = 0;
2804 u8 mac[ETH_ALEN];
2805
2806 memset(mac, 0, ETH_ALEN);
2807
2808 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002809 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2810 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002811 if (cmd.va == NULL)
2812 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002813
2814 if (enable) {
2815 status = pci_write_config_dword(adapter->pdev,
2816 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2817 if (status) {
2818 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002819 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002820 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2821 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002822 return status;
2823 }
2824 status = be_cmd_enable_magic_wol(adapter,
2825 adapter->netdev->dev_addr, &cmd);
2826 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2827 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2828 } else {
2829 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2830 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2831 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2832 }
2833
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002834 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002835 return status;
2836}
2837
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002838/*
2839 * Generate a seed MAC address from the PF MAC Address using jhash.
2840 * MAC Address for VFs are assigned incrementally starting from the seed.
2841 * These addresses are programmed in the ASIC by the PF and the VF driver
2842 * queries for the MAC address during its probe.
2843 */
Sathya Perla4c876612013-02-03 20:30:11 +00002844static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002845{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002846 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002847 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002848 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002849 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002850
2851 be_vf_eth_addr_generate(adapter, mac);
2852
Sathya Perla11ac75e2011-12-13 00:58:50 +00002853 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302854 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002855 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002856 vf_cfg->if_handle,
2857 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302858 else
2859 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2860 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002861
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002862 if (status)
2863 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002864 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002865 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002866 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002867
2868 mac[5] += 1;
2869 }
2870 return status;
2871}
2872
Sathya Perla4c876612013-02-03 20:30:11 +00002873static int be_vfs_mac_query(struct be_adapter *adapter)
2874{
2875 int status, vf;
2876 u8 mac[ETH_ALEN];
2877 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002878
2879 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302880 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2881 mac, vf_cfg->if_handle,
2882 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002883 if (status)
2884 return status;
2885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2886 }
2887 return 0;
2888}
2889
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002890static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002891{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002892 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002893 u32 vf;
2894
Sathya Perla257a3fe2013-06-14 15:54:51 +05302895 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002896 dev_warn(&adapter->pdev->dev,
2897 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002898 goto done;
2899 }
2900
Sathya Perlab4c1df92013-05-08 02:05:47 +00002901 pci_disable_sriov(adapter->pdev);
2902
Sathya Perla11ac75e2011-12-13 00:58:50 +00002903 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302904 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002905 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2906 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302907 else
2908 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2909 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002910
Sathya Perla11ac75e2011-12-13 00:58:50 +00002911 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2912 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002913done:
2914 kfree(adapter->vf_cfg);
2915 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002916}
2917
Sathya Perla77071332013-08-27 16:57:34 +05302918static void be_clear_queues(struct be_adapter *adapter)
2919{
2920 be_mcc_queues_destroy(adapter);
2921 be_rx_cqs_destroy(adapter);
2922 be_tx_queues_destroy(adapter);
2923 be_evt_queues_destroy(adapter);
2924}
2925
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302926static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002927{
Sathya Perla191eb752012-02-23 18:50:13 +00002928 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2929 cancel_delayed_work_sync(&adapter->work);
2930 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2931 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302932}
2933
Somnath Koturb05004a2013-12-05 12:08:16 +05302934static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302935{
2936 int i;
2937
Somnath Koturb05004a2013-12-05 12:08:16 +05302938 if (adapter->pmac_id) {
2939 for (i = 0; i < (adapter->uc_macs + 1); i++)
2940 be_cmd_pmac_del(adapter, adapter->if_handle,
2941 adapter->pmac_id[i], 0);
2942 adapter->uc_macs = 0;
2943
2944 kfree(adapter->pmac_id);
2945 adapter->pmac_id = NULL;
2946 }
2947}
2948
2949static int be_clear(struct be_adapter *adapter)
2950{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302951 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002952
Sathya Perla11ac75e2011-12-13 00:58:50 +00002953 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002954 be_vf_clear(adapter);
2955
Sathya Perla2d17f402013-07-23 15:25:04 +05302956 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302957 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002958
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002959 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002960
Sathya Perla77071332013-08-27 16:57:34 +05302961 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002964 return 0;
2965}
2966
Sathya Perla4c876612013-02-03 20:30:11 +00002967static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002968{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302969 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002970 struct be_vf_cfg *vf_cfg;
2971 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002972 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002973
Sathya Perla4c876612013-02-03 20:30:11 +00002974 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2975 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002976
Sathya Perla4c876612013-02-03 20:30:11 +00002977 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302978 if (!BE3_chip(adapter)) {
2979 status = be_cmd_get_profile_config(adapter, &res,
2980 vf + 1);
2981 if (!status)
2982 cap_flags = res.if_cap_flags;
2983 }
Sathya Perla4c876612013-02-03 20:30:11 +00002984
2985 /* If a FW profile exists, then cap_flags are updated */
2986 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2987 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2988 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2989 &vf_cfg->if_handle, vf + 1);
2990 if (status)
2991 goto err;
2992 }
2993err:
2994 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002995}
2996
Sathya Perla39f1d942012-05-08 19:41:24 +00002997static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002998{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002999 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003000 int vf;
3001
Sathya Perla39f1d942012-05-08 19:41:24 +00003002 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3003 GFP_KERNEL);
3004 if (!adapter->vf_cfg)
3005 return -ENOMEM;
3006
Sathya Perla11ac75e2011-12-13 00:58:50 +00003007 for_all_vfs(adapter, vf_cfg, vf) {
3008 vf_cfg->if_handle = -1;
3009 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003010 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003011 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003012}
3013
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003014static int be_vf_setup(struct be_adapter *adapter)
3015{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003016 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003017 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00003018 int status, old_vfs, vf;
3019 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05303020 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003021
Sathya Perla257a3fe2013-06-14 15:54:51 +05303022 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003023 if (old_vfs) {
3024 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3025 if (old_vfs != num_vfs)
3026 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3027 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003028 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303029 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003030 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303031 be_max_vfs(adapter), num_vfs);
3032 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003033 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003034 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003035 }
3036
3037 status = be_vf_setup_init(adapter);
3038 if (status)
3039 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003040
Sathya Perla4c876612013-02-03 20:30:11 +00003041 if (old_vfs) {
3042 for_all_vfs(adapter, vf_cfg, vf) {
3043 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3044 if (status)
3045 goto err;
3046 }
3047 } else {
3048 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003049 if (status)
3050 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003051 }
3052
Sathya Perla4c876612013-02-03 20:30:11 +00003053 if (old_vfs) {
3054 status = be_vfs_mac_query(adapter);
3055 if (status)
3056 goto err;
3057 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003058 status = be_vf_eth_addr_config(adapter);
3059 if (status)
3060 goto err;
3061 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003062
Sathya Perla11ac75e2011-12-13 00:58:50 +00003063 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303064 /* Allow VFs to programs MAC/VLAN filters */
3065 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3066 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3067 status = be_cmd_set_fn_privileges(adapter,
3068 privileges |
3069 BE_PRIV_FILTMGMT,
3070 vf + 1);
3071 if (!status)
3072 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3073 vf);
3074 }
3075
Sathya Perla4c876612013-02-03 20:30:11 +00003076 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3077 * Allow full available bandwidth
3078 */
3079 if (BE3_chip(adapter) && !old_vfs)
3080 be_cmd_set_qos(adapter, 1000, vf+1);
3081
3082 status = be_cmd_link_status_query(adapter, &lnk_speed,
3083 NULL, vf + 1);
3084 if (!status)
3085 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003086
3087 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003088 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003089 if (status)
3090 goto err;
3091 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003092
Vasundhara Volam05998632013-10-01 15:59:59 +05303093 if (!old_vfs)
3094 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003095 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003096
3097 if (!old_vfs) {
3098 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3099 if (status) {
3100 dev_err(dev, "SRIOV enable failed\n");
3101 adapter->num_vfs = 0;
3102 goto err;
3103 }
3104 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003105 return 0;
3106err:
Sathya Perla4c876612013-02-03 20:30:11 +00003107 dev_err(dev, "VF setup failed\n");
3108 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003109 return status;
3110}
3111
Sathya Perla92bf14a2013-08-27 16:57:32 +05303112/* On BE2/BE3 FW does not suggest the supported limits */
3113static void BEx_get_resources(struct be_adapter *adapter,
3114 struct be_resources *res)
3115{
3116 struct pci_dev *pdev = adapter->pdev;
3117 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303118 int max_vfs;
3119
3120 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303121
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303122 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303123 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303124 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303125 }
3126
3127 if (be_physfn(adapter))
3128 res->max_uc_mac = BE_UC_PMAC_COUNT;
3129 else
3130 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3131
3132 if (adapter->function_mode & FLEX10_MODE)
3133 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003134 else if (adapter->function_mode & UMC_ENABLED)
3135 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303136 else
3137 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3138 res->max_mcast_mac = BE_MAX_MC;
3139
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303140 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303141 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303142 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303143 res->max_tx_qs = 1;
3144 else
3145 res->max_tx_qs = BE3_MAX_TX_QS;
3146
3147 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3148 !use_sriov && be_physfn(adapter))
3149 res->max_rss_qs = (adapter->be3_native) ?
3150 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3151 res->max_rx_qs = res->max_rss_qs + 1;
3152
Suresh Reddye3dc8672014-01-06 13:02:25 +05303153 if (be_physfn(adapter))
3154 res->max_evt_qs = (max_vfs > 0) ?
3155 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3156 else
3157 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303158
3159 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3160 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3161 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3162}
3163
Sathya Perla30128032011-11-10 19:17:57 +00003164static void be_setup_init(struct be_adapter *adapter)
3165{
3166 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003167 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003168 adapter->if_handle = -1;
3169 adapter->be3_native = false;
3170 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003171 if (be_physfn(adapter))
3172 adapter->cmd_privileges = MAX_PRIVILEGES;
3173 else
3174 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003175}
3176
Sathya Perla92bf14a2013-08-27 16:57:32 +05303177static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003178{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303179 struct device *dev = &adapter->pdev->dev;
3180 struct be_resources res = {0};
3181 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003182
Sathya Perla92bf14a2013-08-27 16:57:32 +05303183 if (BEx_chip(adapter)) {
3184 BEx_get_resources(adapter, &res);
3185 adapter->res = res;
3186 }
3187
Sathya Perla92bf14a2013-08-27 16:57:32 +05303188 /* For Lancer, SH etc read per-function resource limits from FW.
3189 * GET_FUNC_CONFIG returns per function guaranteed limits.
3190 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3191 */
Sathya Perla4c876612013-02-03 20:30:11 +00003192 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303193 status = be_cmd_get_func_config(adapter, &res);
3194 if (status)
3195 return status;
3196
3197 /* If RoCE may be enabled stash away half the EQs for RoCE */
3198 if (be_roce_supported(adapter))
3199 res.max_evt_qs /= 2;
3200 adapter->res = res;
3201
3202 if (be_physfn(adapter)) {
3203 status = be_cmd_get_profile_config(adapter, &res, 0);
3204 if (status)
3205 return status;
3206 adapter->res.max_vfs = res.max_vfs;
3207 }
3208
3209 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3210 be_max_txqs(adapter), be_max_rxqs(adapter),
3211 be_max_rss(adapter), be_max_eqs(adapter),
3212 be_max_vfs(adapter));
3213 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3214 be_max_uc(adapter), be_max_mc(adapter),
3215 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003216 }
3217
Sathya Perla92bf14a2013-08-27 16:57:32 +05303218 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003219}
3220
Sathya Perla39f1d942012-05-08 19:41:24 +00003221/* Routine to query per function resource limits */
3222static int be_get_config(struct be_adapter *adapter)
3223{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303224 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003225 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003226
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003227 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3228 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003229 &adapter->function_caps,
3230 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003231 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303232 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003233
Vasundhara Volam542963b2014-01-15 13:23:33 +05303234 if (be_physfn(adapter)) {
3235 status = be_cmd_get_active_profile(adapter, &profile_id);
3236 if (!status)
3237 dev_info(&adapter->pdev->dev,
3238 "Using profile 0x%x\n", profile_id);
3239 }
3240
Sathya Perla92bf14a2013-08-27 16:57:32 +05303241 status = be_get_resources(adapter);
3242 if (status)
3243 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003244
3245 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303246 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3247 GFP_KERNEL);
3248 if (!adapter->pmac_id)
3249 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003250
Sathya Perla92bf14a2013-08-27 16:57:32 +05303251 /* Sanitize cfg_num_qs based on HW and platform limits */
3252 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3253
3254 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003255}
3256
Sathya Perla95046b92013-07-23 15:25:02 +05303257static int be_mac_setup(struct be_adapter *adapter)
3258{
3259 u8 mac[ETH_ALEN];
3260 int status;
3261
3262 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3263 status = be_cmd_get_perm_mac(adapter, mac);
3264 if (status)
3265 return status;
3266
3267 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3268 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3269 } else {
3270 /* Maybe the HW was reset; dev_addr must be re-programmed */
3271 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3272 }
3273
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003274 /* For BE3-R VFs, the PF programs the initial MAC address */
3275 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3276 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3277 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303278 return 0;
3279}
3280
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303281static void be_schedule_worker(struct be_adapter *adapter)
3282{
3283 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3284 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3285}
3286
Sathya Perla77071332013-08-27 16:57:34 +05303287static int be_setup_queues(struct be_adapter *adapter)
3288{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303289 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303290 int status;
3291
3292 status = be_evt_queues_create(adapter);
3293 if (status)
3294 goto err;
3295
3296 status = be_tx_qs_create(adapter);
3297 if (status)
3298 goto err;
3299
3300 status = be_rx_cqs_create(adapter);
3301 if (status)
3302 goto err;
3303
3304 status = be_mcc_queues_create(adapter);
3305 if (status)
3306 goto err;
3307
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303308 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3309 if (status)
3310 goto err;
3311
3312 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3313 if (status)
3314 goto err;
3315
Sathya Perla77071332013-08-27 16:57:34 +05303316 return 0;
3317err:
3318 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3319 return status;
3320}
3321
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303322int be_update_queues(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
3325 int status;
3326
3327 if (netif_running(netdev))
3328 be_close(netdev);
3329
3330 be_cancel_worker(adapter);
3331
3332 /* If any vectors have been shared with RoCE we cannot re-program
3333 * the MSIx table.
3334 */
3335 if (!adapter->num_msix_roce_vec)
3336 be_msix_disable(adapter);
3337
3338 be_clear_queues(adapter);
3339
3340 if (!msix_enabled(adapter)) {
3341 status = be_msix_enable(adapter);
3342 if (status)
3343 return status;
3344 }
3345
3346 status = be_setup_queues(adapter);
3347 if (status)
3348 return status;
3349
3350 be_schedule_worker(adapter);
3351
3352 if (netif_running(netdev))
3353 status = be_open(netdev);
3354
3355 return status;
3356}
3357
Sathya Perla5fb379e2009-06-18 00:02:59 +00003358static int be_setup(struct be_adapter *adapter)
3359{
Sathya Perla39f1d942012-05-08 19:41:24 +00003360 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303361 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003362 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363
Sathya Perla30128032011-11-10 19:17:57 +00003364 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003365
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003366 if (!lancer_chip(adapter))
3367 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003368
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003369 status = be_get_config(adapter);
3370 if (status)
3371 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003372
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003373 status = be_msix_enable(adapter);
3374 if (status)
3375 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003376
Sathya Perla77071332013-08-27 16:57:34 +05303377 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3378 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3379 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3380 en_flags |= BE_IF_FLAGS_RSS;
3381 en_flags = en_flags & be_if_cap_flags(adapter);
3382 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3383 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003384 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003385 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003386
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303387 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3388 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303389 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303390 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003391 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003392 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003393
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003394 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003395
Sathya Perla95046b92013-07-23 15:25:02 +05303396 status = be_mac_setup(adapter);
3397 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003398 goto err;
3399
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003400 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003401
Somnath Koture9e2a902013-10-24 14:37:53 +05303402 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3403 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3404 adapter->fw_ver);
3405 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3406 }
3407
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003408 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003409 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003410
3411 be_set_rx_mode(adapter->netdev);
3412
Suresh Reddy76a9e082014-01-15 13:23:40 +05303413 be_cmd_get_acpi_wol_cap(adapter);
3414
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003415 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003416
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003417 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3418 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003419 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003420
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303421 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303422 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003423 be_vf_setup(adapter);
3424 else
3425 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003426 }
3427
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003428 status = be_cmd_get_phy_info(adapter);
3429 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003430 adapter->phy.fc_autoneg = 1;
3431
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303432 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003433 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003434err:
3435 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003436 return status;
3437}
3438
Ivan Vecera66268732011-12-08 01:31:21 +00003439#ifdef CONFIG_NET_POLL_CONTROLLER
3440static void be_netpoll(struct net_device *netdev)
3441{
3442 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003443 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003444 int i;
3445
Sathya Perlae49cc342012-11-27 19:50:02 +00003446 for_all_evt_queues(adapter, eqo, i) {
3447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3448 napi_schedule(&eqo->napi);
3449 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003450
3451 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003452}
3453#endif
3454
Ajit Khaparde84517482009-09-04 03:12:16 +00003455#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003456static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003457
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003458static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003459 const u8 *p, u32 img_start, int image_size,
3460 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003461{
3462 u32 crc_offset;
3463 u8 flashed_crc[4];
3464 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003465
3466 crc_offset = hdr_size + img_start + image_size - 4;
3467
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003468 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003469
3470 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003471 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003472 if (status) {
3473 dev_err(&adapter->pdev->dev,
3474 "could not get crc from flash, not flashing redboot\n");
3475 return false;
3476 }
3477
3478 /*update redboot only if crc does not match*/
3479 if (!memcmp(flashed_crc, p, 4))
3480 return false;
3481 else
3482 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003483}
3484
Sathya Perla306f1342011-08-02 19:57:45 +00003485static bool phy_flashing_required(struct be_adapter *adapter)
3486{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003487 return (adapter->phy.phy_type == TN_8022 &&
3488 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003489}
3490
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003491static bool is_comp_in_ufi(struct be_adapter *adapter,
3492 struct flash_section_info *fsec, int type)
3493{
3494 int i = 0, img_type = 0;
3495 struct flash_section_info_g2 *fsec_g2 = NULL;
3496
Sathya Perlaca34fe32012-11-06 17:48:56 +00003497 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003498 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3499
3500 for (i = 0; i < MAX_FLASH_COMP; i++) {
3501 if (fsec_g2)
3502 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3503 else
3504 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3505
3506 if (img_type == type)
3507 return true;
3508 }
3509 return false;
3510
3511}
3512
Jingoo Han4188e7d2013-08-05 18:02:02 +09003513static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003514 int header_size,
3515 const struct firmware *fw)
3516{
3517 struct flash_section_info *fsec = NULL;
3518 const u8 *p = fw->data;
3519
3520 p += header_size;
3521 while (p < (fw->data + fw->size)) {
3522 fsec = (struct flash_section_info *)p;
3523 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3524 return fsec;
3525 p += 32;
3526 }
3527 return NULL;
3528}
3529
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003530static int be_flash(struct be_adapter *adapter, const u8 *img,
3531 struct be_dma_mem *flash_cmd, int optype, int img_size)
3532{
3533 u32 total_bytes = 0, flash_op, num_bytes = 0;
3534 int status = 0;
3535 struct be_cmd_write_flashrom *req = flash_cmd->va;
3536
3537 total_bytes = img_size;
3538 while (total_bytes) {
3539 num_bytes = min_t(u32, 32*1024, total_bytes);
3540
3541 total_bytes -= num_bytes;
3542
3543 if (!total_bytes) {
3544 if (optype == OPTYPE_PHY_FW)
3545 flash_op = FLASHROM_OPER_PHY_FLASH;
3546 else
3547 flash_op = FLASHROM_OPER_FLASH;
3548 } else {
3549 if (optype == OPTYPE_PHY_FW)
3550 flash_op = FLASHROM_OPER_PHY_SAVE;
3551 else
3552 flash_op = FLASHROM_OPER_SAVE;
3553 }
3554
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003555 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003556 img += num_bytes;
3557 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3558 flash_op, num_bytes);
3559 if (status) {
3560 if (status == ILLEGAL_IOCTL_REQ &&
3561 optype == OPTYPE_PHY_FW)
3562 break;
3563 dev_err(&adapter->pdev->dev,
3564 "cmd to write to flash rom failed.\n");
3565 return status;
3566 }
3567 }
3568 return 0;
3569}
3570
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003571/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003572static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003573 const struct firmware *fw,
3574 struct be_dma_mem *flash_cmd,
3575 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003576
Ajit Khaparde84517482009-09-04 03:12:16 +00003577{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003578 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003579 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003580 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003581 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003582 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003583 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003584
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003585 struct flash_comp gen3_flash_types[] = {
3586 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3587 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3588 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3589 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3590 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3592 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3593 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3594 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3595 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3596 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3598 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3599 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3600 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3601 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3602 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3603 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3604 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3605 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003606 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003607
3608 struct flash_comp gen2_flash_types[] = {
3609 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3610 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3611 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3612 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3613 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3615 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3616 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3617 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3618 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3619 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3621 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3622 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3623 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3624 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003625 };
3626
Sathya Perlaca34fe32012-11-06 17:48:56 +00003627 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003628 pflashcomp = gen3_flash_types;
3629 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003630 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003631 } else {
3632 pflashcomp = gen2_flash_types;
3633 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003634 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003635 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003636
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003637 /* Get flash section info*/
3638 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3639 if (!fsec) {
3640 dev_err(&adapter->pdev->dev,
3641 "Invalid Cookie. UFI corrupted ?\n");
3642 return -1;
3643 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003644 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003645 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003646 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003647
3648 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3649 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3650 continue;
3651
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003652 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3653 !phy_flashing_required(adapter))
3654 continue;
3655
3656 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3657 redboot = be_flash_redboot(adapter, fw->data,
3658 pflashcomp[i].offset, pflashcomp[i].size,
3659 filehdr_size + img_hdrs_size);
3660 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003661 continue;
3662 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003663
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003664 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003665 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003666 if (p + pflashcomp[i].size > fw->data + fw->size)
3667 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003668
3669 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3670 pflashcomp[i].size);
3671 if (status) {
3672 dev_err(&adapter->pdev->dev,
3673 "Flashing section type %d failed.\n",
3674 pflashcomp[i].img_type);
3675 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003676 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003677 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003678 return 0;
3679}
3680
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003681static int be_flash_skyhawk(struct be_adapter *adapter,
3682 const struct firmware *fw,
3683 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003684{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003685 int status = 0, i, filehdr_size = 0;
3686 int img_offset, img_size, img_optype, redboot;
3687 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3688 const u8 *p = fw->data;
3689 struct flash_section_info *fsec = NULL;
3690
3691 filehdr_size = sizeof(struct flash_file_hdr_g3);
3692 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3693 if (!fsec) {
3694 dev_err(&adapter->pdev->dev,
3695 "Invalid Cookie. UFI corrupted ?\n");
3696 return -1;
3697 }
3698
3699 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3700 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3701 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3702
3703 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3704 case IMAGE_FIRMWARE_iSCSI:
3705 img_optype = OPTYPE_ISCSI_ACTIVE;
3706 break;
3707 case IMAGE_BOOT_CODE:
3708 img_optype = OPTYPE_REDBOOT;
3709 break;
3710 case IMAGE_OPTION_ROM_ISCSI:
3711 img_optype = OPTYPE_BIOS;
3712 break;
3713 case IMAGE_OPTION_ROM_PXE:
3714 img_optype = OPTYPE_PXE_BIOS;
3715 break;
3716 case IMAGE_OPTION_ROM_FCoE:
3717 img_optype = OPTYPE_FCOE_BIOS;
3718 break;
3719 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3720 img_optype = OPTYPE_ISCSI_BACKUP;
3721 break;
3722 case IMAGE_NCSI:
3723 img_optype = OPTYPE_NCSI_FW;
3724 break;
3725 default:
3726 continue;
3727 }
3728
3729 if (img_optype == OPTYPE_REDBOOT) {
3730 redboot = be_flash_redboot(adapter, fw->data,
3731 img_offset, img_size,
3732 filehdr_size + img_hdrs_size);
3733 if (!redboot)
3734 continue;
3735 }
3736
3737 p = fw->data;
3738 p += filehdr_size + img_offset + img_hdrs_size;
3739 if (p + img_size > fw->data + fw->size)
3740 return -1;
3741
3742 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3743 if (status) {
3744 dev_err(&adapter->pdev->dev,
3745 "Flashing section type %d failed.\n",
3746 fsec->fsec_entry[i].type);
3747 return status;
3748 }
3749 }
3750 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003751}
3752
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003753static int lancer_fw_download(struct be_adapter *adapter,
3754 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003755{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003756#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3757#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3758 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003759 const u8 *data_ptr = NULL;
3760 u8 *dest_image_ptr = NULL;
3761 size_t image_size = 0;
3762 u32 chunk_size = 0;
3763 u32 data_written = 0;
3764 u32 offset = 0;
3765 int status = 0;
3766 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003767 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003768
3769 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3770 dev_err(&adapter->pdev->dev,
3771 "FW Image not properly aligned. "
3772 "Length must be 4 byte aligned.\n");
3773 status = -EINVAL;
3774 goto lancer_fw_exit;
3775 }
3776
3777 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3778 + LANCER_FW_DOWNLOAD_CHUNK;
3779 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003780 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003781 if (!flash_cmd.va) {
3782 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003783 goto lancer_fw_exit;
3784 }
3785
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003786 dest_image_ptr = flash_cmd.va +
3787 sizeof(struct lancer_cmd_req_write_object);
3788 image_size = fw->size;
3789 data_ptr = fw->data;
3790
3791 while (image_size) {
3792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3793
3794 /* Copy the image chunk content. */
3795 memcpy(dest_image_ptr, data_ptr, chunk_size);
3796
3797 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003798 chunk_size, offset,
3799 LANCER_FW_DOWNLOAD_LOCATION,
3800 &data_written, &change_status,
3801 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003802 if (status)
3803 break;
3804
3805 offset += data_written;
3806 data_ptr += data_written;
3807 image_size -= data_written;
3808 }
3809
3810 if (!status) {
3811 /* Commit the FW written */
3812 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003813 0, offset,
3814 LANCER_FW_DOWNLOAD_LOCATION,
3815 &data_written, &change_status,
3816 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003817 }
3818
3819 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3820 flash_cmd.dma);
3821 if (status) {
3822 dev_err(&adapter->pdev->dev,
3823 "Firmware load error. "
3824 "Status code: 0x%x Additional Status: 0x%x\n",
3825 status, add_status);
3826 goto lancer_fw_exit;
3827 }
3828
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003829 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303830 dev_info(&adapter->pdev->dev,
3831 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003832 status = lancer_physdev_ctrl(adapter,
3833 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003834 if (status) {
3835 dev_err(&adapter->pdev->dev,
3836 "Adapter busy for FW reset.\n"
3837 "New FW will not be active.\n");
3838 goto lancer_fw_exit;
3839 }
3840 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3841 dev_err(&adapter->pdev->dev,
3842 "System reboot required for new FW"
3843 " to be active\n");
3844 }
3845
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003846 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3847lancer_fw_exit:
3848 return status;
3849}
3850
Sathya Perlaca34fe32012-11-06 17:48:56 +00003851#define UFI_TYPE2 2
3852#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003853#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003854#define UFI_TYPE4 4
3855static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003856 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003857{
3858 if (fhdr == NULL)
3859 goto be_get_ufi_exit;
3860
Sathya Perlaca34fe32012-11-06 17:48:56 +00003861 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3862 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003863 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3864 if (fhdr->asic_type_rev == 0x10)
3865 return UFI_TYPE3R;
3866 else
3867 return UFI_TYPE3;
3868 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003869 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003870
3871be_get_ufi_exit:
3872 dev_err(&adapter->pdev->dev,
3873 "UFI and Interface are not compatible for flashing\n");
3874 return -1;
3875}
3876
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003877static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3878{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003879 struct flash_file_hdr_g3 *fhdr3;
3880 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003881 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003882 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003883 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003884
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003885 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003886 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3887 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003888 if (!flash_cmd.va) {
3889 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003890 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003891 }
3892
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003893 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003894 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003895
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003896 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003897
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003898 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3899 for (i = 0; i < num_imgs; i++) {
3900 img_hdr_ptr = (struct image_hdr *)(fw->data +
3901 (sizeof(struct flash_file_hdr_g3) +
3902 i * sizeof(struct image_hdr)));
3903 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003904 switch (ufi_type) {
3905 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003906 status = be_flash_skyhawk(adapter, fw,
3907 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003908 break;
3909 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003910 status = be_flash_BEx(adapter, fw, &flash_cmd,
3911 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003912 break;
3913 case UFI_TYPE3:
3914 /* Do not flash this ufi on BE3-R cards */
3915 if (adapter->asic_rev < 0x10)
3916 status = be_flash_BEx(adapter, fw,
3917 &flash_cmd,
3918 num_imgs);
3919 else {
3920 status = -1;
3921 dev_err(&adapter->pdev->dev,
3922 "Can't load BE3 UFI on BE3R\n");
3923 }
3924 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003925 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003926 }
3927
Sathya Perlaca34fe32012-11-06 17:48:56 +00003928 if (ufi_type == UFI_TYPE2)
3929 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003930 else if (ufi_type == -1)
3931 status = -1;
3932
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003933 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3934 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003935 if (status) {
3936 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003937 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003938 }
3939
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003940 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003941
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003942be_fw_exit:
3943 return status;
3944}
3945
3946int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3947{
3948 const struct firmware *fw;
3949 int status;
3950
3951 if (!netif_running(adapter->netdev)) {
3952 dev_err(&adapter->pdev->dev,
3953 "Firmware load not allowed (interface is down)\n");
3954 return -1;
3955 }
3956
3957 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3958 if (status)
3959 goto fw_exit;
3960
3961 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3962
3963 if (lancer_chip(adapter))
3964 status = lancer_fw_download(adapter, fw);
3965 else
3966 status = be_fw_download(adapter, fw);
3967
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003968 if (!status)
3969 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3970 adapter->fw_on_flash);
3971
Ajit Khaparde84517482009-09-04 03:12:16 +00003972fw_exit:
3973 release_firmware(fw);
3974 return status;
3975}
3976
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003977static int be_ndo_bridge_setlink(struct net_device *dev,
3978 struct nlmsghdr *nlh)
3979{
3980 struct be_adapter *adapter = netdev_priv(dev);
3981 struct nlattr *attr, *br_spec;
3982 int rem;
3983 int status = 0;
3984 u16 mode = 0;
3985
3986 if (!sriov_enabled(adapter))
3987 return -EOPNOTSUPP;
3988
3989 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3990
3991 nla_for_each_nested(attr, br_spec, rem) {
3992 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3993 continue;
3994
3995 mode = nla_get_u16(attr);
3996 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3997 return -EINVAL;
3998
3999 status = be_cmd_set_hsw_config(adapter, 0, 0,
4000 adapter->if_handle,
4001 mode == BRIDGE_MODE_VEPA ?
4002 PORT_FWD_TYPE_VEPA :
4003 PORT_FWD_TYPE_VEB);
4004 if (status)
4005 goto err;
4006
4007 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4008 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4009
4010 return status;
4011 }
4012err:
4013 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4014 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4015
4016 return status;
4017}
4018
4019static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4020 struct net_device *dev,
4021 u32 filter_mask)
4022{
4023 struct be_adapter *adapter = netdev_priv(dev);
4024 int status = 0;
4025 u8 hsw_mode;
4026
4027 if (!sriov_enabled(adapter))
4028 return 0;
4029
4030 /* BE and Lancer chips support VEB mode only */
4031 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4032 hsw_mode = PORT_FWD_TYPE_VEB;
4033 } else {
4034 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4035 adapter->if_handle, &hsw_mode);
4036 if (status)
4037 return 0;
4038 }
4039
4040 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4041 hsw_mode == PORT_FWD_TYPE_VEPA ?
4042 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4043}
4044
stephen hemmingere5686ad2012-01-05 19:10:25 +00004045static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004046 .ndo_open = be_open,
4047 .ndo_stop = be_close,
4048 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004049 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004050 .ndo_set_mac_address = be_mac_addr_set,
4051 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004052 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004053 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004054 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4055 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004056 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004057 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004058 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004059 .ndo_get_vf_config = be_get_vf_config,
4060#ifdef CONFIG_NET_POLL_CONTROLLER
4061 .ndo_poll_controller = be_netpoll,
4062#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004063 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4064 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304065#ifdef CONFIG_NET_RX_BUSY_POLL
4066 .ndo_busy_poll = be_busy_poll
4067#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004068};
4069
4070static void be_netdev_init(struct net_device *netdev)
4071{
4072 struct be_adapter *adapter = netdev_priv(netdev);
4073
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004074 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004075 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004076 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004077 if (be_multi_rxq(adapter))
4078 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004079
4080 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004081 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004082
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004083 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004084 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004085
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004086 netdev->priv_flags |= IFF_UNICAST_FLT;
4087
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004088 netdev->flags |= IFF_MULTICAST;
4089
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004090 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004092 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093
4094 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095}
4096
4097static void be_unmap_pci_bars(struct be_adapter *adapter)
4098{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004099 if (adapter->csr)
4100 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004101 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004102 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004103}
4104
Sathya Perlace66f782012-11-06 17:48:58 +00004105static int db_bar(struct be_adapter *adapter)
4106{
4107 if (lancer_chip(adapter) || !be_physfn(adapter))
4108 return 0;
4109 else
4110 return 4;
4111}
4112
4113static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004114{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004115 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004116 adapter->roce_db.size = 4096;
4117 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4118 db_bar(adapter));
4119 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4120 db_bar(adapter));
4121 }
Parav Pandit045508a2012-03-26 14:27:13 +00004122 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004123}
4124
4125static int be_map_pci_bars(struct be_adapter *adapter)
4126{
4127 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004128
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004129 if (BEx_chip(adapter) && be_physfn(adapter)) {
4130 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4131 if (adapter->csr == NULL)
4132 return -ENOMEM;
4133 }
4134
Sathya Perlace66f782012-11-06 17:48:58 +00004135 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004136 if (addr == NULL)
4137 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004138 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004139
4140 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004141 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004143pci_map_err:
4144 be_unmap_pci_bars(adapter);
4145 return -ENOMEM;
4146}
4147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004148static void be_ctrl_cleanup(struct be_adapter *adapter)
4149{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004150 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004151
4152 be_unmap_pci_bars(adapter);
4153
4154 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004155 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4156 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004157
Sathya Perla5b8821b2011-08-02 19:57:44 +00004158 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004159 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004160 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4161 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004162}
4163
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004164static int be_ctrl_init(struct be_adapter *adapter)
4165{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004166 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4167 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004168 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004169 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004170 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004171
Sathya Perlace66f782012-11-06 17:48:58 +00004172 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4173 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4174 SLI_INTF_FAMILY_SHIFT;
4175 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4176
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 status = be_map_pci_bars(adapter);
4178 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004179 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004180
4181 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004182 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4183 mbox_mem_alloc->size,
4184 &mbox_mem_alloc->dma,
4185 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004186 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004187 status = -ENOMEM;
4188 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004189 }
4190 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4191 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4192 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4193 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004194
Sathya Perla5b8821b2011-08-02 19:57:44 +00004195 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004196 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4197 rx_filter->size, &rx_filter->dma,
4198 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004199 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004200 status = -ENOMEM;
4201 goto free_mbox;
4202 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004203
Ivan Vecera29849612010-12-14 05:43:19 +00004204 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004205 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207
Suresh Reddy5eeff632014-01-06 13:02:24 +05304208 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004209 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004210 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004211
4212free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004213 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4214 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004215
4216unmap_pci_bars:
4217 be_unmap_pci_bars(adapter);
4218
4219done:
4220 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004221}
4222
4223static void be_stats_cleanup(struct be_adapter *adapter)
4224{
Sathya Perla3abcded2010-10-03 22:12:27 -07004225 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004226
4227 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004228 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4229 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004230}
4231
4232static int be_stats_init(struct be_adapter *adapter)
4233{
Sathya Perla3abcded2010-10-03 22:12:27 -07004234 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004235
Sathya Perlaca34fe32012-11-06 17:48:56 +00004236 if (lancer_chip(adapter))
4237 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4238 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004239 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004240 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004241 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004242 else
4243 /* ALL non-BE ASICs */
4244 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004245
Joe Perchesede23fa2013-08-26 22:45:23 -07004246 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4247 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004248 if (cmd->va == NULL)
4249 return -1;
4250 return 0;
4251}
4252
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004253static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004254{
4255 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004256
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 if (!adapter)
4258 return;
4259
Parav Pandit045508a2012-03-26 14:27:13 +00004260 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004261 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004262
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004263 cancel_delayed_work_sync(&adapter->func_recovery_work);
4264
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004265 unregister_netdev(adapter->netdev);
4266
Sathya Perla5fb379e2009-06-18 00:02:59 +00004267 be_clear(adapter);
4268
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004269 /* tell fw we're done with firing cmds */
4270 be_cmd_fw_clean(adapter);
4271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004272 be_stats_cleanup(adapter);
4273
4274 be_ctrl_cleanup(adapter);
4275
Sathya Perlad6b6d982012-09-05 01:56:48 +00004276 pci_disable_pcie_error_reporting(pdev);
4277
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004278 pci_release_regions(pdev);
4279 pci_disable_device(pdev);
4280
4281 free_netdev(adapter->netdev);
4282}
4283
Sathya Perla39f1d942012-05-08 19:41:24 +00004284static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004285{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304286 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004287
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004288 status = be_cmd_get_cntl_attributes(adapter);
4289 if (status)
4290 return status;
4291
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004292 /* Must be a power of 2 or else MODULO will BUG_ON */
4293 adapter->be_get_temp_freq = 64;
4294
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304295 if (BEx_chip(adapter)) {
4296 level = be_cmd_get_fw_log_level(adapter);
4297 adapter->msg_enable =
4298 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4299 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004300
Sathya Perla92bf14a2013-08-27 16:57:32 +05304301 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004302 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004303}
4304
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004305static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004306{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004307 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004308 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004309
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004310 status = lancer_test_and_set_rdy_state(adapter);
4311 if (status)
4312 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004313
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004314 if (netif_running(adapter->netdev))
4315 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004316
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004317 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004318
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004319 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004320
4321 status = be_setup(adapter);
4322 if (status)
4323 goto err;
4324
4325 if (netif_running(adapter->netdev)) {
4326 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004327 if (status)
4328 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004329 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004330
Somnath Kotur4bebb562013-12-05 12:07:55 +05304331 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004332 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004333err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004334 if (status == -EAGAIN)
4335 dev_err(dev, "Waiting for resource provisioning\n");
4336 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304337 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004338
4339 return status;
4340}
4341
4342static void be_func_recovery_task(struct work_struct *work)
4343{
4344 struct be_adapter *adapter =
4345 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004346 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004347
4348 be_detect_error(adapter);
4349
4350 if (adapter->hw_error && lancer_chip(adapter)) {
4351
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004352 rtnl_lock();
4353 netif_device_detach(adapter->netdev);
4354 rtnl_unlock();
4355
4356 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004357 if (!status)
4358 netif_device_attach(adapter->netdev);
4359 }
4360
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004361 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4362 * no need to attempt further recovery.
4363 */
4364 if (!status || status == -EAGAIN)
4365 schedule_delayed_work(&adapter->func_recovery_work,
4366 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004367}
4368
4369static void be_worker(struct work_struct *work)
4370{
4371 struct be_adapter *adapter =
4372 container_of(work, struct be_adapter, work.work);
4373 struct be_rx_obj *rxo;
4374 int i;
4375
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004376 /* when interrupts are not yet enabled, just reap any pending
4377 * mcc completions */
4378 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004379 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004380 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004381 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004382 goto reschedule;
4383 }
4384
4385 if (!adapter->stats_cmd_sent) {
4386 if (lancer_chip(adapter))
4387 lancer_cmd_get_pport_stats(adapter,
4388 &adapter->stats_cmd);
4389 else
4390 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4391 }
4392
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304393 if (be_physfn(adapter) &&
4394 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004395 be_cmd_get_die_temperature(adapter);
4396
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004397 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304398 /* Replenish RX-queues starved due to memory
4399 * allocation failures.
4400 */
4401 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004402 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004403 }
4404
Sathya Perla2632baf2013-10-01 16:00:00 +05304405 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004406
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004407reschedule:
4408 adapter->work_counter++;
4409 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4410}
4411
Sathya Perla257a3fe2013-06-14 15:54:51 +05304412/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004413static bool be_reset_required(struct be_adapter *adapter)
4414{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304415 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004416}
4417
Sathya Perlad3791422012-09-28 04:39:44 +00004418static char *mc_name(struct be_adapter *adapter)
4419{
4420 if (adapter->function_mode & FLEX10_MODE)
4421 return "FLEX10";
4422 else if (adapter->function_mode & VNIC_MODE)
4423 return "vNIC";
4424 else if (adapter->function_mode & UMC_ENABLED)
4425 return "UMC";
4426 else
4427 return "";
4428}
4429
4430static inline char *func_name(struct be_adapter *adapter)
4431{
4432 return be_physfn(adapter) ? "PF" : "VF";
4433}
4434
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004435static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436{
4437 int status = 0;
4438 struct be_adapter *adapter;
4439 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004440 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004441
4442 status = pci_enable_device(pdev);
4443 if (status)
4444 goto do_none;
4445
4446 status = pci_request_regions(pdev, DRV_NAME);
4447 if (status)
4448 goto disable_dev;
4449 pci_set_master(pdev);
4450
Sathya Perla7f640062012-06-05 19:37:20 +00004451 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004452 if (netdev == NULL) {
4453 status = -ENOMEM;
4454 goto rel_reg;
4455 }
4456 adapter = netdev_priv(netdev);
4457 adapter->pdev = pdev;
4458 pci_set_drvdata(pdev, adapter);
4459 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004460 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004461
Russell King4c15c242013-06-26 23:49:11 +01004462 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004463 if (!status) {
4464 netdev->features |= NETIF_F_HIGHDMA;
4465 } else {
Russell King4c15c242013-06-26 23:49:11 +01004466 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004467 if (status) {
4468 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4469 goto free_netdev;
4470 }
4471 }
4472
Ajit Khapardeea58c182013-10-18 16:06:24 -05004473 if (be_physfn(adapter)) {
4474 status = pci_enable_pcie_error_reporting(pdev);
4475 if (!status)
4476 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4477 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004479 status = be_ctrl_init(adapter);
4480 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004481 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004482
Sathya Perla2243e2e2009-11-22 22:02:03 +00004483 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004484 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004485 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004486 if (status)
4487 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004488 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004489
Sathya Perla39f1d942012-05-08 19:41:24 +00004490 if (be_reset_required(adapter)) {
4491 status = be_cmd_reset_function(adapter);
4492 if (status)
4493 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004494
Kalesh AP2d177be2013-04-28 22:22:29 +00004495 /* Wait for interrupts to quiesce after an FLR */
4496 msleep(100);
4497 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004498
4499 /* Allow interrupts for other ULPs running on NIC function */
4500 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004501
Kalesh AP2d177be2013-04-28 22:22:29 +00004502 /* tell fw we're ready to fire cmds */
4503 status = be_cmd_fw_init(adapter);
4504 if (status)
4505 goto ctrl_clean;
4506
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004507 status = be_stats_init(adapter);
4508 if (status)
4509 goto ctrl_clean;
4510
Sathya Perla39f1d942012-05-08 19:41:24 +00004511 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004512 if (status)
4513 goto stats_clean;
4514
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004516 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004517 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004518
Sathya Perla5fb379e2009-06-18 00:02:59 +00004519 status = be_setup(adapter);
4520 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004521 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004522
Sathya Perla3abcded2010-10-03 22:12:27 -07004523 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004524 status = register_netdev(netdev);
4525 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004526 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527
Parav Pandit045508a2012-03-26 14:27:13 +00004528 be_roce_dev_add(adapter);
4529
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004530 schedule_delayed_work(&adapter->func_recovery_work,
4531 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004532
4533 be_cmd_query_port_name(adapter, &port_name);
4534
Sathya Perlad3791422012-09-28 04:39:44 +00004535 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4536 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538 return 0;
4539
Sathya Perla5fb379e2009-06-18 00:02:59 +00004540unsetup:
4541 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542stats_clean:
4543 be_stats_cleanup(adapter);
4544ctrl_clean:
4545 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004546free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004547 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004548rel_reg:
4549 pci_release_regions(pdev);
4550disable_dev:
4551 pci_disable_device(pdev);
4552do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004553 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004554 return status;
4555}
4556
4557static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4558{
4559 struct be_adapter *adapter = pci_get_drvdata(pdev);
4560 struct net_device *netdev = adapter->netdev;
4561
Suresh Reddy76a9e082014-01-15 13:23:40 +05304562 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004563 be_setup_wol(adapter, true);
4564
Ajit Khaparded4360d62013-11-22 12:51:09 -06004565 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004566 cancel_delayed_work_sync(&adapter->func_recovery_work);
4567
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568 netif_device_detach(netdev);
4569 if (netif_running(netdev)) {
4570 rtnl_lock();
4571 be_close(netdev);
4572 rtnl_unlock();
4573 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004574 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575
4576 pci_save_state(pdev);
4577 pci_disable_device(pdev);
4578 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4579 return 0;
4580}
4581
4582static int be_resume(struct pci_dev *pdev)
4583{
4584 int status = 0;
4585 struct be_adapter *adapter = pci_get_drvdata(pdev);
4586 struct net_device *netdev = adapter->netdev;
4587
4588 netif_device_detach(netdev);
4589
4590 status = pci_enable_device(pdev);
4591 if (status)
4592 return status;
4593
Yijing Wang1ca01512013-06-27 20:53:42 +08004594 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004595 pci_restore_state(pdev);
4596
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304597 status = be_fw_wait_ready(adapter);
4598 if (status)
4599 return status;
4600
Ajit Khaparded4360d62013-11-22 12:51:09 -06004601 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004602 /* tell fw we're ready to fire cmds */
4603 status = be_cmd_fw_init(adapter);
4604 if (status)
4605 return status;
4606
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004607 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004608 if (netif_running(netdev)) {
4609 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004610 be_open(netdev);
4611 rtnl_unlock();
4612 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004613
4614 schedule_delayed_work(&adapter->func_recovery_work,
4615 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004616 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004617
Suresh Reddy76a9e082014-01-15 13:23:40 +05304618 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004619 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004620
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004621 return 0;
4622}
4623
Sathya Perla82456b02010-02-17 01:35:37 +00004624/*
4625 * An FLR will stop BE from DMAing any data.
4626 */
4627static void be_shutdown(struct pci_dev *pdev)
4628{
4629 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004630
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004631 if (!adapter)
4632 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004633
Sathya Perla0f4a6822011-03-21 20:49:28 +00004634 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004635 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004636
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004637 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004638
Ajit Khaparde57841862011-04-06 18:08:43 +00004639 be_cmd_reset_function(adapter);
4640
Sathya Perla82456b02010-02-17 01:35:37 +00004641 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004642}
4643
Sathya Perlacf588472010-02-14 21:22:01 +00004644static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4645 pci_channel_state_t state)
4646{
4647 struct be_adapter *adapter = pci_get_drvdata(pdev);
4648 struct net_device *netdev = adapter->netdev;
4649
4650 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4651
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004652 if (!adapter->eeh_error) {
4653 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004654
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004655 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004656
Sathya Perlacf588472010-02-14 21:22:01 +00004657 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004658 netif_device_detach(netdev);
4659 if (netif_running(netdev))
4660 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004661 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004662
4663 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004664 }
Sathya Perlacf588472010-02-14 21:22:01 +00004665
4666 if (state == pci_channel_io_perm_failure)
4667 return PCI_ERS_RESULT_DISCONNECT;
4668
4669 pci_disable_device(pdev);
4670
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004671 /* The error could cause the FW to trigger a flash debug dump.
4672 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004673 * can cause it not to recover; wait for it to finish.
4674 * Wait only for first function as it is needed only once per
4675 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004676 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004677 if (pdev->devfn == 0)
4678 ssleep(30);
4679
Sathya Perlacf588472010-02-14 21:22:01 +00004680 return PCI_ERS_RESULT_NEED_RESET;
4681}
4682
4683static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4684{
4685 struct be_adapter *adapter = pci_get_drvdata(pdev);
4686 int status;
4687
4688 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004689
4690 status = pci_enable_device(pdev);
4691 if (status)
4692 return PCI_ERS_RESULT_DISCONNECT;
4693
4694 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004695 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004696 pci_restore_state(pdev);
4697
4698 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004699 dev_info(&adapter->pdev->dev,
4700 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004701 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004702 if (status)
4703 return PCI_ERS_RESULT_DISCONNECT;
4704
Sathya Perlad6b6d982012-09-05 01:56:48 +00004705 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004706 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004707 return PCI_ERS_RESULT_RECOVERED;
4708}
4709
4710static void be_eeh_resume(struct pci_dev *pdev)
4711{
4712 int status = 0;
4713 struct be_adapter *adapter = pci_get_drvdata(pdev);
4714 struct net_device *netdev = adapter->netdev;
4715
4716 dev_info(&adapter->pdev->dev, "EEH resume\n");
4717
4718 pci_save_state(pdev);
4719
Kalesh AP2d177be2013-04-28 22:22:29 +00004720 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004721 if (status)
4722 goto err;
4723
Kalesh AP2d177be2013-04-28 22:22:29 +00004724 /* tell fw we're ready to fire cmds */
4725 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004726 if (status)
4727 goto err;
4728
Sathya Perlacf588472010-02-14 21:22:01 +00004729 status = be_setup(adapter);
4730 if (status)
4731 goto err;
4732
4733 if (netif_running(netdev)) {
4734 status = be_open(netdev);
4735 if (status)
4736 goto err;
4737 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004738
4739 schedule_delayed_work(&adapter->func_recovery_work,
4740 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004741 netif_device_attach(netdev);
4742 return;
4743err:
4744 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004745}
4746
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004747static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004748 .error_detected = be_eeh_err_detected,
4749 .slot_reset = be_eeh_reset,
4750 .resume = be_eeh_resume,
4751};
4752
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004753static struct pci_driver be_driver = {
4754 .name = DRV_NAME,
4755 .id_table = be_dev_ids,
4756 .probe = be_probe,
4757 .remove = be_remove,
4758 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004759 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004760 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004761 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004762};
4763
4764static int __init be_init_module(void)
4765{
Joe Perches8e95a202009-12-03 07:58:21 +00004766 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4767 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004768 printk(KERN_WARNING DRV_NAME
4769 " : Module param rx_frag_size must be 2048/4096/8192."
4770 " Using 2048\n");
4771 rx_frag_size = 2048;
4772 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004773
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004774 return pci_register_driver(&be_driver);
4775}
4776module_init(be_init_module);
4777
4778static void __exit be_exit_module(void)
4779{
4780 pci_unregister_driver(&be_driver);
4781}
4782module_exit(be_exit_module);