blob: 9704a1b23cef0ce07d6e5eaa77da7bf390573171 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000284 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000285 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700286
Sathya Perla5a712c12013-07-23 15:24:59 +0530287 /* The MAC change did not happen, either due to lack of privilege
288 * or PF didn't pre-provision.
289 */
dingtianhong61d23e92013-12-30 15:40:43 +0800290 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530291 status = -EPERM;
292 goto err;
293 }
294
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530296 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000297 return 0;
298err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 return status;
301}
302
Sathya Perlaca34fe32012-11-06 17:48:56 +0000303/* BE2 supports only v0 cmd */
304static void *hw_stats_from_cmd(struct be_adapter *adapter)
305{
306 if (BE2_chip(adapter)) {
307 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500310 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000311 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
313 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500314 } else {
315 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000318 }
319}
320
321/* BE2 supports only v0 cmd */
322static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323{
324 if (BE2_chip(adapter)) {
325 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500328 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000329 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
331 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500332 } else {
333 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000336 }
337}
338
339static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 &rxf_stats->port[adapter->port_num];
346 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000347
Sathya Perlaac124ff2011-07-25 19:10:14 +0000348 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000363 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 drvs->rx_dropped_header_too_small =
365 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000366 drvs->rx_address_filtered =
367 port_stats->rx_address_filtered +
368 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_alignment_symbol_errors =
370 port_stats->rx_alignment_symbol_errors;
371
372 drvs->tx_pauseframes = port_stats->tx_pauseframes;
373 drvs->tx_controlframes = port_stats->tx_controlframes;
374
375 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->forwarded_packets = rxf_stats->forwarded_packets;
382 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386}
387
Sathya Perlaca34fe32012-11-06 17:48:56 +0000388static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 &rxf_stats->port[adapter->port_num];
395 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perlaac124ff2011-07-25 19:10:14 +0000397 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000398 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_pause_frames = port_stats->rx_pause_frames;
401 drvs->rx_crc_errors = port_stats->rx_crc_errors;
402 drvs->rx_control_frames = port_stats->rx_control_frames;
403 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413 drvs->rx_dropped_header_too_small =
414 port_stats->rx_dropped_header_too_small;
415 drvs->rx_input_fifo_overflow_drop =
416 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000417 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 drvs->rx_alignment_symbol_errors =
419 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 drvs->tx_pauseframes = port_stats->tx_pauseframes;
422 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000423 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->jabber_events = port_stats->jabber_events;
425 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->forwarded_packets = rxf_stats->forwarded_packets;
428 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432}
433
Ajit Khaparde61000862013-10-03 16:16:33 -0500434static void populate_be_v2_stats(struct be_adapter *adapter)
435{
436 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439 struct be_port_rxf_stats_v2 *port_stats =
440 &rxf_stats->port[adapter->port_num];
441 struct be_drv_stats *drvs = &adapter->drv_stats;
442
443 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446 drvs->rx_pause_frames = port_stats->rx_pause_frames;
447 drvs->rx_crc_errors = port_stats->rx_crc_errors;
448 drvs->rx_control_frames = port_stats->rx_control_frames;
449 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459 drvs->rx_dropped_header_too_small =
460 port_stats->rx_dropped_header_too_small;
461 drvs->rx_input_fifo_overflow_drop =
462 port_stats->rx_input_fifo_overflow_drop;
463 drvs->rx_address_filtered = port_stats->rx_address_filtered;
464 drvs->rx_alignment_symbol_errors =
465 port_stats->rx_alignment_symbol_errors;
466 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467 drvs->tx_pauseframes = port_stats->tx_pauseframes;
468 drvs->tx_controlframes = port_stats->tx_controlframes;
469 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470 drvs->jabber_events = port_stats->jabber_events;
471 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473 drvs->forwarded_packets = rxf_stats->forwarded_packets;
474 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500478 if (be_roce_supported(adapter)) {
479 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481 drvs->rx_roce_frames = port_stats->roce_frames_received;
482 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483 drvs->roce_drops_payload_len =
484 port_stats->roce_drops_payload_len;
485 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500486}
487
Selvin Xavier005d5692011-05-16 07:36:35 +0000488static void populate_lancer_stats(struct be_adapter *adapter)
489{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490
Selvin Xavier005d5692011-05-16 07:36:35 +0000491 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 struct lancer_pport_stats *pport_stats =
493 pport_stats_from_cmd(adapter);
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000540 struct be_rx_obj *rxo,
541 u32 erx_stat)
542{
543 if (!BEx_chip(adapter))
544 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545 else
546 /* below erx HW counter can actually wrap around after
547 * 65535. Driver accumulates a 32-bit value
548 */
549 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550 (u16)erx_stat);
551}
552
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000553void be_parse_stats(struct be_adapter *adapter)
554{
Ajit Khaparde61000862013-10-03 16:16:33 -0500555 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000556 struct be_rx_obj *rxo;
557 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000558 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559
Sathya Perlaca34fe32012-11-06 17:48:56 +0000560 if (lancer_chip(adapter)) {
561 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000562 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (BE2_chip(adapter))
564 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500565 else if (BE3_chip(adapter))
566 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000567 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else
569 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000570
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000572 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000573 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000576 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000577}
578
Sathya Perlaab1594e2011-07-25 19:10:15 +0000579static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000583 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700584 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586 u64 pkts, bytes;
587 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700588 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Sathya Perla3abcded2010-10-03 22:12:27 -0700590 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000591 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592 do {
593 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594 pkts = rx_stats(rxo)->rx_pkts;
595 bytes = rx_stats(rxo)->rx_bytes;
596 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597 stats->rx_packets += pkts;
598 stats->rx_bytes += bytes;
599 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700602 }
603
Sathya Perla3c8def92011-06-12 20:01:58 +0000604 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000605 const struct be_tx_stats *tx_stats = tx_stats(txo);
606 do {
607 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608 pkts = tx_stats(txo)->tx_pkts;
609 bytes = tx_stats(txo)->tx_bytes;
610 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611 stats->tx_packets += pkts;
612 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000613 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614
615 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000617 drvs->rx_alignment_symbol_errors +
618 drvs->rx_in_range_errors +
619 drvs->rx_out_range_errors +
620 drvs->rx_frame_too_long +
621 drvs->rx_dropped_too_small +
622 drvs->rx_dropped_too_short +
623 drvs->rx_dropped_header_too_small +
624 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000628 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000629 drvs->rx_out_range_errors +
630 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000631
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637 /* receiver fifo overrun */
638 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000640 drvs->rx_input_fifo_overflow_drop +
641 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643}
644
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000645void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct net_device *netdev = adapter->netdev;
648
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000649 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000650 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000651 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000653
654 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655 netif_carrier_on(netdev);
656 else
657 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658}
659
Sathya Perla3c8def92011-06-12 20:01:58 +0000660static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000661 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705 struct sk_buff *skb)
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000721 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000723 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700724
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 memset(hdr, 0, sizeof(*hdr));
726
727 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000729 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000734 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736 if (is_tcp_pkt(skb))
737 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738 else if (is_udp_pkt(skb))
739 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740 }
741
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700742 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000744 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000748 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753}
754
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000755static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000756 bool unmap_single)
757{
758 dma_addr_t dma;
759
760 be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000763 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000764 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 dma_unmap_single(dev, dma, wrb->frag_len,
766 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000767 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000768 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000769 }
770}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771
Sathya Perla3c8def92011-06-12 20:01:58 +0000772static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000773 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775{
Sathya Perla7101e112010-03-22 20:41:12 +0000776 dma_addr_t busaddr;
777 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000778 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct be_eth_wrb *wrb;
781 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000782 bool map_single = false;
783 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 hdr = queue_head_node(txq);
786 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
David S. Millerebc8d2a2009-06-09 01:01:31 -0700789 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700790 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000791 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000793 goto dma_err;
794 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700795 wrb = queue_head_node(txq);
796 wrb_fill(wrb, busaddr, len);
797 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798 queue_head_inc(txq);
799 copied += len;
800 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801
David S. Millerebc8d2a2009-06-09 01:01:31 -0700802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000803 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700804 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000805 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000806 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000807 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000808 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000810 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700811 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000813 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
815
816 if (dummy_wrb) {
817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, 0, 0);
819 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820 queue_head_inc(txq);
821 }
822
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000823 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824 be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000827dma_err:
828 txq->head = map_head;
829 while (copied) {
830 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000831 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000832 map_single = false;
833 copied -= wrb->frag_len;
834 queue_head_inc(txq);
835 }
836 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Somnath Kotur93040ae2012-06-26 22:32:10 +0000839static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000840 struct sk_buff *skb,
841 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000842{
843 u16 vlan_tag = 0;
844
845 skb = skb_share_check(skb, GFP_ATOMIC);
846 if (unlikely(!skb))
847 return skb;
848
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000849 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000850 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530851
852 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853 if (!vlan_tag)
854 vlan_tag = adapter->pvid;
855 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856 * skip VLAN insertion
857 */
858 if (skip_hw_vlan)
859 *skip_hw_vlan = true;
860 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000861
862 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400863 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000864 if (unlikely(!skb))
865 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000866 skb->vlan_tci = 0;
867 }
868
869 /* Insert the outer VLAN, if any */
870 if (adapter->qnq_vid) {
871 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400872 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000873 if (unlikely(!skb))
874 return skb;
875 if (skip_hw_vlan)
876 *skip_hw_vlan = true;
877 }
878
Somnath Kotur93040ae2012-06-26 22:32:10 +0000879 return skb;
880}
881
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000882static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883{
884 struct ethhdr *eh = (struct ethhdr *)skb->data;
885 u16 offset = ETH_HLEN;
886
887 if (eh->h_proto == htons(ETH_P_IPV6)) {
888 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890 offset += sizeof(struct ipv6hdr);
891 if (ip6h->nexthdr != NEXTHDR_TCP &&
892 ip6h->nexthdr != NEXTHDR_UDP) {
893 struct ipv6_opt_hdr *ehdr =
894 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897 if (ehdr->hdrlen == 0xff)
898 return true;
899 }
900 }
901 return false;
902}
903
904static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905{
906 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907}
908
Sathya Perlaee9c7992013-05-22 23:04:55 +0000909static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000911{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000912 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000913}
914
Sathya Perlaee9c7992013-05-22 23:04:55 +0000915static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916 struct sk_buff *skb,
917 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000919 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000920 unsigned int eth_hdr_len;
921 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000922
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500923 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000924 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500925 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000926 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500927 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000928 if (skb_padto(skb, 36))
929 goto tx_drop;
930 skb->len = 36;
931 }
932
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000933 /* For padded packets, BE HW modifies tot_len field in IP header
934 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000935 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000936 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000937 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000939 if (skb->len <= 60 &&
940 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000941 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000942 ip = (struct iphdr *)ip_hdr(skb);
943 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944 }
945
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000946 /* If vlan tag is already inlined in the packet, skip HW VLAN
947 * tagging in UMC mode
948 */
949 if ((adapter->function_mode & UMC_ENABLED) &&
950 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000951 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000952
Somnath Kotur93040ae2012-06-26 22:32:10 +0000953 /* HW has a bug wherein it will calculate CSUM for VLAN
954 * pkts even though it is disabled.
955 * Manually insert VLAN in pkt.
956 */
957 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000958 vlan_tx_tag_present(skb)) {
959 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000960 if (unlikely(!skb))
961 goto tx_drop;
962 }
963
964 /* HW may lockup when VLAN HW tagging is requested on
965 * certain ipv6 packets. Drop such pkts if the HW workaround to
966 * skip HW tagging is not enabled by FW.
967 */
968 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000969 (adapter->pvid || adapter->qnq_vid) &&
970 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 goto tx_drop;
972
973 /* Manual VLAN tag insertion to prevent:
974 * ASIC lockup when the ASIC inserts VLAN tag into
975 * certain ipv6 packets. Insert VLAN tags in driver,
976 * and set event, completion, vlan bits accordingly
977 * in the Tx WRB.
978 */
979 if (be_ipv6_tx_stall_chk(adapter, skb) &&
980 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000982 if (unlikely(!skb))
983 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000984 }
985
Sathya Perlaee9c7992013-05-22 23:04:55 +0000986 return skb;
987tx_drop:
988 dev_kfree_skb_any(skb);
989 return NULL;
990}
991
992static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
995 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996 struct be_queue_info *txq = &txo->q;
997 bool dummy_wrb, stopped = false;
998 u32 wrb_cnt = 0, copied = 0;
999 bool skip_hw_vlan = false;
1000 u32 start = txq->head;
1001
1002 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301003 if (!skb) {
1004 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001005 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301006 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001007
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001008 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001012 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001013 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001015 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001016 BUG_ON(txo->sent_skb_list[start]);
1017 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001019 /* Ensure txq has space for the next skb; Else stop the queue
1020 * *BEFORE* ringing the tx doorbell, so that we serialze the
1021 * tx compls of the current transmit which'll wake up the queue
1022 */
Sathya Perla7101e112010-03-22 20:41:12 +00001023 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001024 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001026 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001027 stopped = true;
1028 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001030 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001031
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001032 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001033 } else {
1034 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301035 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001036 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 return NETDEV_TX_OK;
1039}
1040
1041static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042{
1043 struct be_adapter *adapter = netdev_priv(netdev);
1044 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001045 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047 dev_info(&adapter->pdev->dev,
1048 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001049 BE_MIN_MTU,
1050 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051 return -EINVAL;
1052 }
1053 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054 netdev->mtu, new_mtu);
1055 netdev->mtu = new_mtu;
1056 return 0;
1057}
1058
1059/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001060 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 */
Sathya Perla10329df2012-06-05 19:37:18 +00001063static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064{
Sathya Perla10329df2012-06-05 19:37:18 +00001065 u16 vids[BE_NUM_VLANS_SUPPORTED];
1066 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001067 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001068
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001069 /* No need to further configure vids if in promiscuous mode */
1070 if (adapter->promiscuous)
1071 return 0;
1072
Sathya Perla92bf14a2013-08-27 16:57:32 +05301073 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001074 goto set_vlan_promisc;
1075
1076 /* Construct VLAN Table to give to HW */
1077 for (i = 0; i < VLAN_N_VID; i++)
1078 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001079 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001080
1081 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001082 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001083
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001084 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001085 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087 goto set_vlan_promisc;
1088 dev_err(&adapter->pdev->dev,
1089 "Setting HW VLAN filtering failed.\n");
1090 } else {
1091 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092 /* hw VLAN filtering re-enabled. */
1093 status = be_cmd_rx_filter(adapter,
1094 BE_FLAGS_VLAN_PROMISC, OFF);
1095 if (!status) {
1096 dev_info(&adapter->pdev->dev,
1097 "Disabling VLAN Promiscuous mode.\n");
1098 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099 dev_info(&adapter->pdev->dev,
1100 "Re-Enabling HW VLAN filtering\n");
1101 }
1102 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001104
Sathya Perlab31c50a2009-09-17 10:30:13 -07001105 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001106
1107set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001108 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115 } else
1116 dev_err(&adapter->pdev->dev,
1117 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001118 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119}
1120
Patrick McHardy80d5c362013-04-19 02:04:28 +00001121static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122{
1123 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001124 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001126
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001127 /* Packets with VID 0 are always received by Lancer by default */
1128 if (lancer_chip(adapter) && vid == 0)
1129 goto ret;
1130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301132 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001133 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001134
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001135 if (!status)
1136 adapter->vlans_added++;
1137 else
1138 adapter->vlan_tag[vid] = 0;
1139ret:
1140 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141}
1142
Patrick McHardy80d5c362013-04-19 02:04:28 +00001143static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
1145 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001146 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001148 /* Packets with VID 0 are always received by Lancer by default */
1149 if (lancer_chip(adapter) && vid == 0)
1150 goto ret;
1151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301153 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001154 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001155
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001156 if (!status)
1157 adapter->vlans_added--;
1158 else
1159 adapter->vlan_tag[vid] = 1;
1160ret:
1161 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162}
1163
Sathya Perlaa54769f2011-10-24 02:45:00 +00001164static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165{
1166 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001167 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168
1169 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001170 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001171 adapter->promiscuous = true;
1172 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001174
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001175 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001176 if (adapter->promiscuous) {
1177 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001178 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001179
1180 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001181 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001182 }
1183
Sathya Perlae7b909a2009-11-22 22:01:10 +00001184 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001185 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301186 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001187 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001188 goto done;
1189 }
1190
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001191 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192 struct netdev_hw_addr *ha;
1193 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196 be_cmd_pmac_del(adapter, adapter->if_handle,
1197 adapter->pmac_id[i], 0);
1198 }
1199
Sathya Perla92bf14a2013-08-27 16:57:32 +05301200 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001201 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202 adapter->promiscuous = true;
1203 goto done;
1204 }
1205
1206 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207 adapter->uc_macs++; /* First slot is for Primary MAC */
1208 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209 adapter->if_handle,
1210 &adapter->pmac_id[adapter->uc_macs], 0);
1211 }
1212 }
1213
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001214 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217 if (status) {
1218 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001222done:
1223 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224}
1225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001226static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227{
1228 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001229 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001230 int status;
1231
Sathya Perla11ac75e2011-12-13 00:58:50 +00001232 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001233 return -EPERM;
1234
Sathya Perla11ac75e2011-12-13 00:58:50 +00001235 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001236 return -EINVAL;
1237
Sathya Perla3175d8c2013-07-23 15:25:03 +05301238 if (BEx_chip(adapter)) {
1239 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001241
Sathya Perla11ac75e2011-12-13 00:58:50 +00001242 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301244 } else {
1245 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001247 }
1248
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001249 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001250 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001252 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001253 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001254
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001255 return status;
1256}
1257
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001258static int be_get_vf_config(struct net_device *netdev, int vf,
1259 struct ifla_vf_info *vi)
1260{
1261 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001262 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001263
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001265 return -EPERM;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001268 return -EINVAL;
1269
1270 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001271 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001272 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001274 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001275
1276 return 0;
1277}
1278
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001279static int be_set_vf_vlan(struct net_device *netdev,
1280 int vf, u16 vlan, u8 qos)
1281{
1282 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001283 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001284 int status = 0;
1285
Sathya Perla11ac75e2011-12-13 00:58:50 +00001286 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001287 return -EPERM;
1288
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001289 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001290 return -EINVAL;
1291
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001292 if (vlan || qos) {
1293 vlan |= qos << VLAN_PRIO_SHIFT;
1294 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001295 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001296 vf_cfg->vlan_tag = vlan;
1297 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001299 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001300 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001301 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001302 vf_cfg->vlan_tag = 0;
1303 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001304 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001305 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001306 }
1307
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001308
1309 if (status)
1310 dev_info(&adapter->pdev->dev,
1311 "VLAN %d config on VF %d failed\n", vlan, vf);
1312 return status;
1313}
1314
Ajit Khapardee1d18732010-07-23 01:52:13 +00001315static int be_set_vf_tx_rate(struct net_device *netdev,
1316 int vf, int rate)
1317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
1319 int status = 0;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001322 return -EPERM;
1323
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001324 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001325 return -EINVAL;
1326
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001327 if (rate < 100 || rate > 10000) {
1328 dev_err(&adapter->pdev->dev,
1329 "tx rate must be between 100 and 10000 Mbps\n");
1330 return -EINVAL;
1331 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001332
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001333 if (lancer_chip(adapter))
1334 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335 else
1336 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001337
1338 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001339 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001340 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001341 else
1342 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001343 return status;
1344}
1345
Sathya Perla2632baf2013-10-01 16:00:00 +05301346static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348{
Sathya Perla2632baf2013-10-01 16:00:00 +05301349 aic->rx_pkts_prev = rx_pkts;
1350 aic->tx_reqs_prev = tx_pkts;
1351 aic->jiffies = now;
1352}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001353
Sathya Perla2632baf2013-10-01 16:00:00 +05301354static void be_eqd_update(struct be_adapter *adapter)
1355{
1356 struct be_set_eqd set_eqd[MAX_EVT_QS];
1357 int eqd, i, num = 0, start;
1358 struct be_aic_obj *aic;
1359 struct be_eq_obj *eqo;
1360 struct be_rx_obj *rxo;
1361 struct be_tx_obj *txo;
1362 u64 rx_pkts, tx_pkts;
1363 ulong now;
1364 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001365
Sathya Perla2632baf2013-10-01 16:00:00 +05301366 for_all_evt_queues(adapter, eqo, i) {
1367 aic = &adapter->aic_obj[eqo->idx];
1368 if (!aic->enable) {
1369 if (aic->jiffies)
1370 aic->jiffies = 0;
1371 eqd = aic->et_eqd;
1372 goto modify_eqd;
1373 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Sathya Perla2632baf2013-10-01 16:00:00 +05301375 rxo = &adapter->rx_obj[eqo->idx];
1376 do {
1377 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378 rx_pkts = rxo->stats.rx_pkts;
1379 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001380
Sathya Perla2632baf2013-10-01 16:00:00 +05301381 txo = &adapter->tx_obj[eqo->idx];
1382 do {
1383 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384 tx_pkts = txo->stats.tx_reqs;
1385 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001386
Sathya Perla4097f662009-03-24 16:40:13 -07001387
Sathya Perla2632baf2013-10-01 16:00:00 +05301388 /* Skip, if wrapped around or first calculation */
1389 now = jiffies;
1390 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391 rx_pkts < aic->rx_pkts_prev ||
1392 tx_pkts < aic->tx_reqs_prev) {
1393 be_aic_update(aic, rx_pkts, tx_pkts, now);
1394 continue;
1395 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001396
Sathya Perla2632baf2013-10-01 16:00:00 +05301397 delta = jiffies_to_msecs(now - aic->jiffies);
1398 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001401
Sathya Perla2632baf2013-10-01 16:00:00 +05301402 if (eqd < 8)
1403 eqd = 0;
1404 eqd = min_t(u32, eqd, aic->max_eqd);
1405 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301409 if (eqd != aic->prev_eqd) {
1410 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411 set_eqd[num].eq_id = eqo->q.id;
1412 aic->prev_eqd = eqd;
1413 num++;
1414 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001415 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301416
1417 if (num)
1418 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001419}
1420
Sathya Perla3abcded2010-10-03 22:12:27 -07001421static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001423{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001424 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001425
Sathya Perlaab1594e2011-07-25 19:10:15 +00001426 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001429 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001430 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001431 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001433 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001434 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435}
1436
Sathya Perla2e588f82011-03-11 02:49:26 +00001437static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001438{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001439 /* L4 checksum is not reliable for non TCP/UDP packets.
1440 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001441 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001443}
1444
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301445static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001447 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001449 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301450 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
Sathya Perla3abcded2010-10-03 22:12:27 -07001452 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 BUG_ON(!rx_page_info->page);
1454
Ajit Khaparde205859a2010-02-09 01:34:21 +00001455 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001456 dma_unmap_page(&adapter->pdev->dev,
1457 dma_unmap_addr(rx_page_info, bus),
1458 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001459 rx_page_info->last_page_user = false;
1460 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301462 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 atomic_dec(&rxq->used);
1464 return rx_page_info;
1465}
1466
1467/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001468static void be_rx_compl_discard(struct be_rx_obj *rxo,
1469 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001474 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301475 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001476 put_page(page_info->page);
1477 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 }
1479}
1480
1481/*
1482 * skb_fill_rx_data forms a complete skb for an ether frame
1483 * indicated by rxcp.
1484 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1486 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001489 u16 i, j;
1490 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 u8 *start;
1492
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301493 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 start = page_address(page_info->page) + page_info->page_offset;
1495 prefetch(start);
1496
1497 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001498 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 skb->len = curr_frag_len;
1501 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001502 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 /* Complete packet has now been moved to data */
1504 put_page(page_info->page);
1505 skb->data_len = 0;
1506 skb->tail += curr_frag_len;
1507 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001508 hdr_len = ETH_HLEN;
1509 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001511 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 skb_shinfo(skb)->frags[0].page_offset =
1513 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001514 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001516 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 skb->tail += hdr_len;
1518 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001519 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
Sathya Perla2e588f82011-03-11 02:49:26 +00001521 if (rxcp->pkt_size <= rx_frag_size) {
1522 BUG_ON(rxcp->num_rcvd != 1);
1523 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 }
1525
1526 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 remaining = rxcp->pkt_size - curr_frag_len;
1528 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301529 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001530 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001532 /* Coalesce all frags from the same physical page in one slot */
1533 if (page_info->page_offset == 0) {
1534 /* Fresh page */
1535 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001536 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001537 skb_shinfo(skb)->frags[j].page_offset =
1538 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001539 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001540 skb_shinfo(skb)->nr_frags++;
1541 } else {
1542 put_page(page_info->page);
1543 }
1544
Eric Dumazet9e903e02011-10-18 21:00:24 +00001545 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 skb->len += curr_frag_len;
1547 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001548 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001549 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001550 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001552 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553}
1554
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001555/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301556static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001557 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001560 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001562
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001563 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001564 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001565 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001566 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 return;
1568 }
1569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001572 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001573 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001574 else
1575 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001577 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001578 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001580 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301581 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582
Jiri Pirko343e43c2011-08-25 02:50:51 +00001583 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001584 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001585
1586 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587}
1588
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001589/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001590static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1591 struct napi_struct *napi,
1592 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001594 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001596 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001597 u16 remaining, curr_frag_len;
1598 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001599
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001600 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001601 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001602 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001603 return;
1604 }
1605
Sathya Perla2e588f82011-03-11 02:49:26 +00001606 remaining = rxcp->pkt_size;
1607 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301608 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609
1610 curr_frag_len = min(remaining, rx_frag_size);
1611
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001612 /* Coalesce all frags from the same physical page in one slot */
1613 if (i == 0 || page_info->page_offset == 0) {
1614 /* First frag or Fresh page */
1615 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001616 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001617 skb_shinfo(skb)->frags[j].page_offset =
1618 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001619 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001620 } else {
1621 put_page(page_info->page);
1622 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001623 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001624 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 memset(page_info, 0, sizeof(*page_info));
1627 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001628 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001630 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001631 skb->len = rxcp->pkt_size;
1632 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001633 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001634 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001635 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001636 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301637 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001638
Jiri Pirko343e43c2011-08-25 02:50:51 +00001639 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001640 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001642 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643}
1644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001645static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1646 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
Sathya Perla2e588f82011-03-11 02:49:26 +00001648 rxcp->pkt_size =
1649 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1650 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1651 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1652 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001653 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001654 rxcp->ip_csum =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1656 rxcp->l4_csum =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1658 rxcp->ipv6 =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001660 rxcp->num_rcvd =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1662 rxcp->pkt_type =
1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001664 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001666 if (rxcp->vlanf) {
1667 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001668 compl);
1669 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1670 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001671 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001672 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001673}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1676 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001677{
1678 rxcp->pkt_size =
1679 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1680 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1681 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1682 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001683 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001684 rxcp->ip_csum =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1686 rxcp->l4_csum =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1688 rxcp->ipv6 =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001690 rxcp->num_rcvd =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1692 rxcp->pkt_type =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001694 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001696 if (rxcp->vlanf) {
1697 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001698 compl);
1699 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1700 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001701 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001702 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001703 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1704 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001705}
1706
1707static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1708{
1709 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1710 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1711 struct be_adapter *adapter = rxo->adapter;
1712
1713 /* For checking the valid bit it is Ok to use either definition as the
1714 * valid bit is at the same position in both v0 and v1 Rx compl */
1715 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 return NULL;
1717
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001718 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 be_dws_le_to_cpu(compl, sizeof(*compl));
1720
1721 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001722 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001723 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001724 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001725
Somnath Koture38b1702013-05-29 22:55:56 +00001726 if (rxcp->ip_frag)
1727 rxcp->l4_csum = 0;
1728
Sathya Perla15d72182011-03-21 20:49:26 +00001729 if (rxcp->vlanf) {
1730 /* vlanf could be wrongly set in some cards.
1731 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001732 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001733 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001734
Sathya Perla15d72182011-03-21 20:49:26 +00001735 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001736 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001737
Somnath Kotur939cf302011-08-18 21:51:49 -07001738 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001739 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001740 rxcp->vlanf = 0;
1741 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001742
1743 /* As the compl has been parsed, reset it; we wont touch it again */
1744 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Sathya Perla3abcded2010-10-03 22:12:27 -07001746 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 return rxcp;
1748}
1749
Eric Dumazet1829b082011-03-01 05:48:12 +00001750static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001755 gfp |= __GFP_COMP;
1756 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757}
1758
1759/*
1760 * Allocate a page, split it to fragments of size rx_frag_size and post as
1761 * receive buffers to BE
1762 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001763static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764{
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001766 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768 struct page *pagep = NULL;
1769 struct be_eth_rx_d *rxd;
1770 u64 page_dmaaddr = 0, frag_dmaaddr;
1771 u32 posted, page_offset = 0;
1772
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1775 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001776 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001778 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 break;
1780 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001781 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1782 0, adapter->big_page_size,
1783 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784 page_info->page_offset = 0;
1785 } else {
1786 get_page(pagep);
1787 page_info->page_offset = page_offset + rx_frag_size;
1788 }
1789 page_offset = page_info->page_offset;
1790 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001791 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1793
1794 rxd = queue_head_node(rxq);
1795 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1796 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797
1798 /* Any space left in the current big page for another frag? */
1799 if ((page_offset + rx_frag_size + rx_frag_size) >
1800 adapter->big_page_size) {
1801 pagep = NULL;
1802 page_info->last_page_user = true;
1803 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001804
1805 prev_page_info = page_info;
1806 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001807 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808 }
1809 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001810 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811
1812 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301814 if (rxo->rx_post_starved)
1815 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001816 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001817 } else if (atomic_read(&rxq->used) == 0) {
1818 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001819 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821}
1822
Sathya Perla5fb379e2009-06-18 00:02:59 +00001823static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1826
1827 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1828 return NULL;
1829
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001830 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1832
1833 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1834
1835 queue_tail_inc(tx_cq);
1836 return txcp;
1837}
1838
Sathya Perla3c8def92011-06-12 20:01:58 +00001839static u16 be_tx_compl_process(struct be_adapter *adapter,
1840 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841{
Sathya Perla3c8def92011-06-12 20:01:58 +00001842 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001843 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001844 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001846 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1847 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001849 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001851 sent_skbs[txq->tail] = NULL;
1852
1853 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001854 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001856 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001858 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001859 unmap_tx_frag(&adapter->pdev->dev, wrb,
1860 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001861 unmap_skb_hdr = false;
1862
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 num_wrbs++;
1864 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001865 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001868 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869}
1870
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001871/* Return the number of events in the event queue */
1872static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001873{
1874 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001875 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001876
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001877 do {
1878 eqe = queue_tail_node(&eqo->q);
1879 if (eqe->evt == 0)
1880 break;
1881
1882 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001883 eqe->evt = 0;
1884 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 queue_tail_inc(&eqo->q);
1886 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001887
1888 return num;
1889}
1890
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891/* Leaves the EQ is disarmed state */
1892static void be_eq_clean(struct be_eq_obj *eqo)
1893{
1894 int num = events_get(eqo);
1895
1896 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1897}
1898
1899static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900{
1901 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001902 struct be_queue_info *rxq = &rxo->q;
1903 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001904 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001905 struct be_adapter *adapter = rxo->adapter;
1906 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
Sathya Perlad23e9462012-12-17 19:38:51 +00001908 /* Consume pending rx completions.
1909 * Wait for the flush completion (identified by zero num_rcvd)
1910 * to arrive. Notify CQ even when there are no more CQ entries
1911 * for HW to flush partially coalesced CQ entries.
1912 * In Lancer, there is no need to wait for flush compl.
1913 */
1914 for (;;) {
1915 rxcp = be_rx_compl_get(rxo);
1916 if (rxcp == NULL) {
1917 if (lancer_chip(adapter))
1918 break;
1919
1920 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1921 dev_warn(&adapter->pdev->dev,
1922 "did not receive flush compl\n");
1923 break;
1924 }
1925 be_cq_notify(adapter, rx_cq->id, true, 0);
1926 mdelay(1);
1927 } else {
1928 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001929 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001930 if (rxcp->num_rcvd == 0)
1931 break;
1932 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 }
1934
Sathya Perlad23e9462012-12-17 19:38:51 +00001935 /* After cleanup, leave the CQ in unarmed state */
1936 be_cq_notify(adapter, rx_cq->id, false, 0);
1937
1938 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301939 while (atomic_read(&rxq->used) > 0) {
1940 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 put_page(page_info->page);
1942 memset(page_info, 0, sizeof(*page_info));
1943 }
1944 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001945 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946}
1947
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001948static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001950 struct be_tx_obj *txo;
1951 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001952 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001953 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001954 struct sk_buff *sent_skb;
1955 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001956 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957
Sathya Perlaa8e91792009-08-10 03:42:43 +00001958 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1959 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001960 pending_txqs = adapter->num_tx_qs;
1961
1962 for_all_tx_queues(adapter, txo, i) {
1963 txq = &txo->q;
1964 while ((txcp = be_tx_compl_get(&txo->cq))) {
1965 end_idx =
1966 AMAP_GET_BITS(struct amap_eth_tx_compl,
1967 wrb_index, txcp);
1968 num_wrbs += be_tx_compl_process(adapter, txo,
1969 end_idx);
1970 cmpl++;
1971 }
1972 if (cmpl) {
1973 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1974 atomic_sub(num_wrbs, &txq->used);
1975 cmpl = 0;
1976 num_wrbs = 0;
1977 }
1978 if (atomic_read(&txq->used) == 0)
1979 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001980 }
1981
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001982 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001983 break;
1984
1985 mdelay(1);
1986 } while (true);
1987
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001988 for_all_tx_queues(adapter, txo, i) {
1989 txq = &txo->q;
1990 if (atomic_read(&txq->used))
1991 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1992 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001993
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001994 /* free posted tx for which compls will never arrive */
1995 while (atomic_read(&txq->used)) {
1996 sent_skb = txo->sent_skb_list[txq->tail];
1997 end_idx = txq->tail;
1998 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1999 &dummy_wrb);
2000 index_adv(&end_idx, num_wrbs - 1, txq->len);
2001 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2002 atomic_sub(num_wrbs, &txq->used);
2003 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002004 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005}
2006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007static void be_evt_queues_destroy(struct be_adapter *adapter)
2008{
2009 struct be_eq_obj *eqo;
2010 int i;
2011
2012 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002013 if (eqo->q.created) {
2014 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302016 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302017 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002018 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002019 be_queue_free(adapter, &eqo->q);
2020 }
2021}
2022
2023static int be_evt_queues_create(struct be_adapter *adapter)
2024{
2025 struct be_queue_info *eq;
2026 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302027 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028 int i, rc;
2029
Sathya Perla92bf14a2013-08-27 16:57:32 +05302030 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2031 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032
2033 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302034 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2035 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302036 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302037 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 eqo->adapter = adapter;
2039 eqo->tx_budget = BE_TX_BUDGET;
2040 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302041 aic->max_eqd = BE_MAX_EQD;
2042 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002043
2044 eq = &eqo->q;
2045 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2046 sizeof(struct be_eq_entry));
2047 if (rc)
2048 return rc;
2049
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302050 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 if (rc)
2052 return rc;
2053 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002054 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002055}
2056
Sathya Perla5fb379e2009-06-18 00:02:59 +00002057static void be_mcc_queues_destroy(struct be_adapter *adapter)
2058{
2059 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002060
Sathya Perla8788fdc2009-07-27 22:52:03 +00002061 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002062 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002063 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002064 be_queue_free(adapter, q);
2065
Sathya Perla8788fdc2009-07-27 22:52:03 +00002066 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002067 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002068 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002069 be_queue_free(adapter, q);
2070}
2071
2072/* Must be called only after TX qs are created as MCC shares TX EQ */
2073static int be_mcc_queues_create(struct be_adapter *adapter)
2074{
2075 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002076
Sathya Perla8788fdc2009-07-27 22:52:03 +00002077 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002078 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002079 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002080 goto err;
2081
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002082 /* Use the default EQ for MCC completions */
2083 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002084 goto mcc_cq_free;
2085
Sathya Perla8788fdc2009-07-27 22:52:03 +00002086 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002087 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2088 goto mcc_cq_destroy;
2089
Sathya Perla8788fdc2009-07-27 22:52:03 +00002090 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002091 goto mcc_q_free;
2092
2093 return 0;
2094
2095mcc_q_free:
2096 be_queue_free(adapter, q);
2097mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002098 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002099mcc_cq_free:
2100 be_queue_free(adapter, cq);
2101err:
2102 return -1;
2103}
2104
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002105static void be_tx_queues_destroy(struct be_adapter *adapter)
2106{
2107 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002108 struct be_tx_obj *txo;
2109 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110
Sathya Perla3c8def92011-06-12 20:01:58 +00002111 for_all_tx_queues(adapter, txo, i) {
2112 q = &txo->q;
2113 if (q->created)
2114 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2115 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Sathya Perla3c8def92011-06-12 20:01:58 +00002117 q = &txo->cq;
2118 if (q->created)
2119 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2120 be_queue_free(adapter, q);
2121 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122}
2123
Sathya Perla77071332013-08-27 16:57:34 +05302124static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002127 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302128 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
Sathya Perla92bf14a2013-08-27 16:57:32 +05302130 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002131
Sathya Perla3c8def92011-06-12 20:01:58 +00002132 for_all_tx_queues(adapter, txo, i) {
2133 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2135 sizeof(struct be_eth_tx_compl));
2136 if (status)
2137 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138
John Stultz827da442013-10-07 15:51:58 -07002139 u64_stats_init(&txo->stats.sync);
2140 u64_stats_init(&txo->stats.sync_compl);
2141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 /* If num_evt_qs is less than num_tx_qs, then more than
2143 * one txq share an eq
2144 */
2145 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2146 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2147 if (status)
2148 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2151 sizeof(struct be_eth_wrb));
2152 if (status)
2153 return status;
2154
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002155 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 if (status)
2157 return status;
2158 }
2159
Sathya Perlad3791422012-09-28 04:39:44 +00002160 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2161 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 return 0;
2163}
2164
2165static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166{
2167 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002168 struct be_rx_obj *rxo;
2169 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 q = &rxo->cq;
2173 if (q->created)
2174 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2175 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177}
2178
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002180{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002181 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002182 struct be_rx_obj *rxo;
2183 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184
Sathya Perla92bf14a2013-08-27 16:57:32 +05302185 /* We can create as many RSS rings as there are EQs. */
2186 adapter->num_rx_qs = adapter->num_evt_qs;
2187
2188 /* We'll use RSS only if atleast 2 RSS rings are supported.
2189 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302191 if (adapter->num_rx_qs > 1)
2192 adapter->num_rx_qs++;
2193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002195 for_all_rx_queues(adapter, rxo, i) {
2196 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002197 cq = &rxo->cq;
2198 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2199 sizeof(struct be_eth_rx_compl));
2200 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202
John Stultz827da442013-10-07 15:51:58 -07002203 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2205 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002206 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209
Sathya Perlad3791422012-09-28 04:39:44 +00002210 dev_info(&adapter->pdev->dev,
2211 "created %d RSS queue(s) and 1 default RX queue\n",
2212 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002214}
2215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216static irqreturn_t be_intx(int irq, void *dev)
2217{
Sathya Perlae49cc342012-11-27 19:50:02 +00002218 struct be_eq_obj *eqo = dev;
2219 struct be_adapter *adapter = eqo->adapter;
2220 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002222 /* IRQ is not expected when NAPI is scheduled as the EQ
2223 * will not be armed.
2224 * But, this can happen on Lancer INTx where it takes
2225 * a while to de-assert INTx or in BE2 where occasionaly
2226 * an interrupt may be raised even when EQ is unarmed.
2227 * If NAPI is already scheduled, then counting & notifying
2228 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002229 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002230 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002231 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002232 __napi_schedule(&eqo->napi);
2233 if (num_evts)
2234 eqo->spurious_intr = 0;
2235 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002236 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002237
2238 /* Return IRQ_HANDLED only for the the first spurious intr
2239 * after a valid intr to stop the kernel from branding
2240 * this irq as a bad one!
2241 */
2242 if (num_evts || eqo->spurious_intr++ == 0)
2243 return IRQ_HANDLED;
2244 else
2245 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246}
2247
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002248static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251
Sathya Perla0b545a62012-11-23 00:27:18 +00002252 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2253 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 return IRQ_HANDLED;
2255}
2256
Sathya Perla2e588f82011-03-11 02:49:26 +00002257static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258{
Somnath Koture38b1702013-05-29 22:55:56 +00002259 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260}
2261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302263 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264{
Sathya Perla3abcded2010-10-03 22:12:27 -07002265 struct be_adapter *adapter = rxo->adapter;
2266 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002267 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 u32 work_done;
2269
2270 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 if (!rxcp)
2273 break;
2274
Sathya Perla12004ae2011-08-02 19:57:46 +00002275 /* Is it a flush compl that has no data */
2276 if (unlikely(rxcp->num_rcvd == 0))
2277 goto loop_continue;
2278
2279 /* Discard compl with partial DMA Lancer B0 */
2280 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002282 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002283 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002284
Sathya Perla12004ae2011-08-02 19:57:46 +00002285 /* On BE drop pkts that arrive due to imperfect filtering in
2286 * promiscuous mode on some skews
2287 */
2288 if (unlikely(rxcp->port != adapter->port_num &&
2289 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002291 goto loop_continue;
2292 }
2293
Sathya Perla6384a4d2013-10-25 10:40:16 +05302294 /* Don't do gro when we're busy_polling */
2295 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002297 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302298 be_rx_compl_process(rxo, napi, rxcp);
2299
Sathya Perla12004ae2011-08-02 19:57:46 +00002300loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002301 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 }
2303
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 if (work_done) {
2305 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002306
Sathya Perla6384a4d2013-10-25 10:40:16 +05302307 /* When an rx-obj gets into post_starved state, just
2308 * let be_worker do the posting.
2309 */
2310 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2311 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315 return work_done;
2316}
2317
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2319 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 for (work_done = 0; work_done < budget; work_done++) {
2325 txcp = be_tx_compl_get(&txo->cq);
2326 if (!txcp)
2327 break;
2328 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002329 AMAP_GET_BITS(struct amap_eth_tx_compl,
2330 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 }
2332
2333 if (work_done) {
2334 be_cq_notify(adapter, txo->cq.id, true, work_done);
2335 atomic_sub(num_wrbs, &txo->q.used);
2336
2337 /* As Tx wrbs have been freed up, wake up netdev queue
2338 * if it was stopped due to lack of tx wrbs. */
2339 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2340 atomic_read(&txo->q.used) < txo->q.len / 2) {
2341 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002342 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2345 tx_stats(txo)->tx_compl += work_done;
2346 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2347 }
2348 return (work_done < budget); /* Done */
2349}
Sathya Perla3c8def92011-06-12 20:01:58 +00002350
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302351int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352{
2353 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2354 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002355 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302356 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002358
Sathya Perla0b545a62012-11-23 00:27:18 +00002359 num_evts = events_get(eqo);
2360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 /* Process all TXQs serviced by this EQ */
2362 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2363 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2364 eqo->tx_budget, i);
2365 if (!tx_done)
2366 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 }
2368
Sathya Perla6384a4d2013-10-25 10:40:16 +05302369 if (be_lock_napi(eqo)) {
2370 /* This loop will iterate twice for EQ0 in which
2371 * completions of the last RXQ (default one) are also processed
2372 * For other EQs the loop iterates only once
2373 */
2374 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2375 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2376 max_work = max(work, max_work);
2377 }
2378 be_unlock_napi(eqo);
2379 } else {
2380 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002381 }
2382
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 if (is_mcc_eqo(eqo))
2384 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 if (max_work < budget) {
2387 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002388 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 } else {
2390 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002391 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002392 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394}
2395
Sathya Perla6384a4d2013-10-25 10:40:16 +05302396#ifdef CONFIG_NET_RX_BUSY_POLL
2397static int be_busy_poll(struct napi_struct *napi)
2398{
2399 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2400 struct be_adapter *adapter = eqo->adapter;
2401 struct be_rx_obj *rxo;
2402 int i, work = 0;
2403
2404 if (!be_lock_busy_poll(eqo))
2405 return LL_FLUSH_BUSY;
2406
2407 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2408 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2409 if (work)
2410 break;
2411 }
2412
2413 be_unlock_busy_poll(eqo);
2414 return work;
2415}
2416#endif
2417
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002418void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002419{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002420 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2421 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002422 u32 i;
2423
Sathya Perlad23e9462012-12-17 19:38:51 +00002424 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002425 return;
2426
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002427 if (lancer_chip(adapter)) {
2428 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2429 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2430 sliport_err1 = ioread32(adapter->db +
2431 SLIPORT_ERROR1_OFFSET);
2432 sliport_err2 = ioread32(adapter->db +
2433 SLIPORT_ERROR2_OFFSET);
2434 }
2435 } else {
2436 pci_read_config_dword(adapter->pdev,
2437 PCICFG_UE_STATUS_LOW, &ue_lo);
2438 pci_read_config_dword(adapter->pdev,
2439 PCICFG_UE_STATUS_HIGH, &ue_hi);
2440 pci_read_config_dword(adapter->pdev,
2441 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2442 pci_read_config_dword(adapter->pdev,
2443 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002444
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002445 ue_lo = (ue_lo & ~ue_lo_mask);
2446 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002447 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002448
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002449 /* On certain platforms BE hardware can indicate spurious UEs.
2450 * Allow the h/w to stop working completely in case of a real UE.
2451 * Hence not setting the hw_error for UE detection.
2452 */
2453 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002454 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302455 /* Do not log error messages if its a FW reset */
2456 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2457 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2458 dev_info(&adapter->pdev->dev,
2459 "Firmware update in progress\n");
2460 return;
2461 } else {
2462 dev_err(&adapter->pdev->dev,
2463 "Error detected in the card\n");
2464 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002465 }
2466
2467 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2468 dev_err(&adapter->pdev->dev,
2469 "ERR: sliport status 0x%x\n", sliport_status);
2470 dev_err(&adapter->pdev->dev,
2471 "ERR: sliport error1 0x%x\n", sliport_err1);
2472 dev_err(&adapter->pdev->dev,
2473 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002474 }
2475
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002476 if (ue_lo) {
2477 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2478 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002479 dev_err(&adapter->pdev->dev,
2480 "UE: %s bit set\n", ue_status_low_desc[i]);
2481 }
2482 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002483
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002484 if (ue_hi) {
2485 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2486 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002487 dev_err(&adapter->pdev->dev,
2488 "UE: %s bit set\n", ue_status_hi_desc[i]);
2489 }
2490 }
2491
2492}
2493
Sathya Perla8d56ff12009-11-22 22:02:26 +00002494static void be_msix_disable(struct be_adapter *adapter)
2495{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002496 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002497 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002498 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302499 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002500 }
2501}
2502
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002503static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302505 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002506 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507
Sathya Perla92bf14a2013-08-27 16:57:32 +05302508 /* If RoCE is supported, program the max number of NIC vectors that
2509 * may be configured via set-channels, along with vectors needed for
2510 * RoCe. Else, just program the number we'll use initially.
2511 */
2512 if (be_roce_supported(adapter))
2513 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2514 2 * num_online_cpus());
2515 else
2516 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002517
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002518 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519 adapter->msix_entries[i].entry = i;
2520
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002521 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002522 if (status == 0) {
2523 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302524 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002525 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002526 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2527 num_vec);
2528 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002529 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 }
Sathya Perlad3791422012-09-28 04:39:44 +00002531
2532 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302533
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002534 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2535 if (!be_physfn(adapter))
2536 return status;
2537 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002538done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302539 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2540 adapter->num_msix_roce_vec = num_vec / 2;
2541 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2542 adapter->num_msix_roce_vec);
2543 }
2544
2545 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2546
2547 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2548 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002549 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002550}
2551
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002552static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002553 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002554{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302555 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556}
2557
2558static int be_msix_register(struct be_adapter *adapter)
2559{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002560 struct net_device *netdev = adapter->netdev;
2561 struct be_eq_obj *eqo;
2562 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002563
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002564 for_all_evt_queues(adapter, eqo, i) {
2565 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2566 vec = be_msix_vec_get(adapter, eqo);
2567 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002568 if (status)
2569 goto err_msix;
2570 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002572 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002573err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002574 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2575 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2576 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2577 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002578 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579 return status;
2580}
2581
2582static int be_irq_register(struct be_adapter *adapter)
2583{
2584 struct net_device *netdev = adapter->netdev;
2585 int status;
2586
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002587 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588 status = be_msix_register(adapter);
2589 if (status == 0)
2590 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002591 /* INTx is not supported for VF */
2592 if (!be_physfn(adapter))
2593 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 }
2595
Sathya Perlae49cc342012-11-27 19:50:02 +00002596 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597 netdev->irq = adapter->pdev->irq;
2598 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002599 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 if (status) {
2601 dev_err(&adapter->pdev->dev,
2602 "INTx request IRQ failed - err %d\n", status);
2603 return status;
2604 }
2605done:
2606 adapter->isr_registered = true;
2607 return 0;
2608}
2609
2610static void be_irq_unregister(struct be_adapter *adapter)
2611{
2612 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002614 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002615
2616 if (!adapter->isr_registered)
2617 return;
2618
2619 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002620 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002621 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622 goto done;
2623 }
2624
2625 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 for_all_evt_queues(adapter, eqo, i)
2627 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002628
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002629done:
2630 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631}
2632
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002634{
2635 struct be_queue_info *q;
2636 struct be_rx_obj *rxo;
2637 int i;
2638
2639 for_all_rx_queues(adapter, rxo, i) {
2640 q = &rxo->q;
2641 if (q->created) {
2642 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002644 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002646 }
2647}
2648
Sathya Perla889cd4b2010-05-30 23:33:45 +00002649static int be_close(struct net_device *netdev)
2650{
2651 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002652 struct be_eq_obj *eqo;
2653 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002654
Parav Pandit045508a2012-03-26 14:27:13 +00002655 be_roce_dev_close(adapter);
2656
Ivan Veceradff345c52013-11-27 08:59:32 +01002657 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2658 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002659 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302660 be_disable_busy_poll(eqo);
2661 }
David S. Miller71237b62013-11-28 18:53:36 -05002662 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002663 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002664
2665 be_async_mcc_disable(adapter);
2666
2667 /* Wait for all pending tx completions to arrive so that
2668 * all tx skbs are freed.
2669 */
Sathya Perlafba87552013-05-08 02:05:50 +00002670 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302671 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002672
2673 be_rx_qs_destroy(adapter);
2674
Ajit Khaparded11a3472013-11-18 10:44:37 -06002675 for (i = 1; i < (adapter->uc_macs + 1); i++)
2676 be_cmd_pmac_del(adapter, adapter->if_handle,
2677 adapter->pmac_id[i], 0);
2678 adapter->uc_macs = 0;
2679
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002680 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002681 if (msix_enabled(adapter))
2682 synchronize_irq(be_msix_vec_get(adapter, eqo));
2683 else
2684 synchronize_irq(netdev->irq);
2685 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002686 }
2687
Sathya Perla889cd4b2010-05-30 23:33:45 +00002688 be_irq_unregister(adapter);
2689
Sathya Perla482c9e72011-06-29 23:33:17 +00002690 return 0;
2691}
2692
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002693static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002694{
2695 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002696 int rc, i, j;
2697 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002698
2699 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2701 sizeof(struct be_eth_rx_d));
2702 if (rc)
2703 return rc;
2704 }
2705
2706 /* The FW would like the default RXQ to be created first */
2707 rxo = default_rxo(adapter);
2708 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2709 adapter->if_handle, false, &rxo->rss_id);
2710 if (rc)
2711 return rc;
2712
2713 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002714 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 rx_frag_size, adapter->if_handle,
2716 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002717 if (rc)
2718 return rc;
2719 }
2720
2721 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002722 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2723 for_all_rss_queues(adapter, rxo, i) {
2724 if ((j + i) >= 128)
2725 break;
2726 rsstable[j + i] = rxo->rss_id;
2727 }
2728 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002729 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2730 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2731
2732 if (!BEx_chip(adapter))
2733 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2734 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302735 } else {
2736 /* Disable RSS, if only default RX Q is created */
2737 adapter->rss_flags = RSS_ENABLE_NONE;
2738 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002739
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302740 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2741 128);
2742 if (rc) {
2743 adapter->rss_flags = RSS_ENABLE_NONE;
2744 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002745 }
2746
2747 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002748 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002749 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002750 return 0;
2751}
2752
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753static int be_open(struct net_device *netdev)
2754{
2755 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002756 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002757 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002758 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002759 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002761
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002762 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002763 if (status)
2764 goto err;
2765
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002766 status = be_irq_register(adapter);
2767 if (status)
2768 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002769
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002771 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002772
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002773 for_all_tx_queues(adapter, txo, i)
2774 be_cq_notify(adapter, txo->cq.id, true, 0);
2775
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002776 be_async_mcc_enable(adapter);
2777
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002778 for_all_evt_queues(adapter, eqo, i) {
2779 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302780 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2782 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002783 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002784
Sathya Perla323ff712012-09-28 04:39:43 +00002785 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002786 if (!status)
2787 be_link_status_update(adapter, link_status);
2788
Sathya Perlafba87552013-05-08 02:05:50 +00002789 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002790 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002791 return 0;
2792err:
2793 be_close(adapter->netdev);
2794 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002795}
2796
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002797static int be_setup_wol(struct be_adapter *adapter, bool enable)
2798{
2799 struct be_dma_mem cmd;
2800 int status = 0;
2801 u8 mac[ETH_ALEN];
2802
2803 memset(mac, 0, ETH_ALEN);
2804
2805 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002806 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2807 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002808 if (cmd.va == NULL)
2809 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002810
2811 if (enable) {
2812 status = pci_write_config_dword(adapter->pdev,
2813 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2814 if (status) {
2815 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002816 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002817 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2818 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002819 return status;
2820 }
2821 status = be_cmd_enable_magic_wol(adapter,
2822 adapter->netdev->dev_addr, &cmd);
2823 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2824 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2825 } else {
2826 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2827 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2828 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2829 }
2830
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002831 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002832 return status;
2833}
2834
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002835/*
2836 * Generate a seed MAC address from the PF MAC Address using jhash.
2837 * MAC Address for VFs are assigned incrementally starting from the seed.
2838 * These addresses are programmed in the ASIC by the PF and the VF driver
2839 * queries for the MAC address during its probe.
2840 */
Sathya Perla4c876612013-02-03 20:30:11 +00002841static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002842{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002843 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002844 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002845 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002846 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002847
2848 be_vf_eth_addr_generate(adapter, mac);
2849
Sathya Perla11ac75e2011-12-13 00:58:50 +00002850 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302851 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002852 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002853 vf_cfg->if_handle,
2854 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302855 else
2856 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2857 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002858
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002859 if (status)
2860 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002861 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002862 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002863 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002864
2865 mac[5] += 1;
2866 }
2867 return status;
2868}
2869
Sathya Perla4c876612013-02-03 20:30:11 +00002870static int be_vfs_mac_query(struct be_adapter *adapter)
2871{
2872 int status, vf;
2873 u8 mac[ETH_ALEN];
2874 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302875 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002876
2877 for_all_vfs(adapter, vf_cfg, vf) {
2878 be_cmd_get_mac_from_list(adapter, mac, &active,
2879 &vf_cfg->pmac_id, 0);
2880
2881 status = be_cmd_mac_addr_query(adapter, mac, false,
2882 vf_cfg->if_handle, 0);
2883 if (status)
2884 return status;
2885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2886 }
2887 return 0;
2888}
2889
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002890static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002891{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002892 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002893 u32 vf;
2894
Sathya Perla257a3fe2013-06-14 15:54:51 +05302895 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002896 dev_warn(&adapter->pdev->dev,
2897 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002898 goto done;
2899 }
2900
Sathya Perlab4c1df92013-05-08 02:05:47 +00002901 pci_disable_sriov(adapter->pdev);
2902
Sathya Perla11ac75e2011-12-13 00:58:50 +00002903 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302904 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002905 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2906 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302907 else
2908 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2909 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002910
Sathya Perla11ac75e2011-12-13 00:58:50 +00002911 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2912 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002913done:
2914 kfree(adapter->vf_cfg);
2915 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002916}
2917
Sathya Perla77071332013-08-27 16:57:34 +05302918static void be_clear_queues(struct be_adapter *adapter)
2919{
2920 be_mcc_queues_destroy(adapter);
2921 be_rx_cqs_destroy(adapter);
2922 be_tx_queues_destroy(adapter);
2923 be_evt_queues_destroy(adapter);
2924}
2925
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302926static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002927{
Sathya Perla191eb752012-02-23 18:50:13 +00002928 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2929 cancel_delayed_work_sync(&adapter->work);
2930 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2931 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302932}
2933
Somnath Koturb05004a2013-12-05 12:08:16 +05302934static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302935{
2936 int i;
2937
Somnath Koturb05004a2013-12-05 12:08:16 +05302938 if (adapter->pmac_id) {
2939 for (i = 0; i < (adapter->uc_macs + 1); i++)
2940 be_cmd_pmac_del(adapter, adapter->if_handle,
2941 adapter->pmac_id[i], 0);
2942 adapter->uc_macs = 0;
2943
2944 kfree(adapter->pmac_id);
2945 adapter->pmac_id = NULL;
2946 }
2947}
2948
2949static int be_clear(struct be_adapter *adapter)
2950{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302951 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002952
Sathya Perla11ac75e2011-12-13 00:58:50 +00002953 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002954 be_vf_clear(adapter);
2955
Sathya Perla2d17f402013-07-23 15:25:04 +05302956 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302957 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002958
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002959 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002960
Sathya Perla77071332013-08-27 16:57:34 +05302961 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002964 return 0;
2965}
2966
Sathya Perla4c876612013-02-03 20:30:11 +00002967static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002968{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302969 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002970 struct be_vf_cfg *vf_cfg;
2971 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002972 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002973
Sathya Perla4c876612013-02-03 20:30:11 +00002974 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2975 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002976
Sathya Perla4c876612013-02-03 20:30:11 +00002977 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302978 if (!BE3_chip(adapter)) {
2979 status = be_cmd_get_profile_config(adapter, &res,
2980 vf + 1);
2981 if (!status)
2982 cap_flags = res.if_cap_flags;
2983 }
Sathya Perla4c876612013-02-03 20:30:11 +00002984
2985 /* If a FW profile exists, then cap_flags are updated */
2986 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2987 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2988 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2989 &vf_cfg->if_handle, vf + 1);
2990 if (status)
2991 goto err;
2992 }
2993err:
2994 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002995}
2996
Sathya Perla39f1d942012-05-08 19:41:24 +00002997static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002998{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002999 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003000 int vf;
3001
Sathya Perla39f1d942012-05-08 19:41:24 +00003002 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3003 GFP_KERNEL);
3004 if (!adapter->vf_cfg)
3005 return -ENOMEM;
3006
Sathya Perla11ac75e2011-12-13 00:58:50 +00003007 for_all_vfs(adapter, vf_cfg, vf) {
3008 vf_cfg->if_handle = -1;
3009 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003010 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003011 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003012}
3013
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003014static int be_vf_setup(struct be_adapter *adapter)
3015{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003016 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003017 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00003018 int status, old_vfs, vf;
3019 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05303020 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003021
Sathya Perla257a3fe2013-06-14 15:54:51 +05303022 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003023 if (old_vfs) {
3024 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3025 if (old_vfs != num_vfs)
3026 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3027 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003028 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303029 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003030 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303031 be_max_vfs(adapter), num_vfs);
3032 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003033 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003034 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003035 }
3036
3037 status = be_vf_setup_init(adapter);
3038 if (status)
3039 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003040
Sathya Perla4c876612013-02-03 20:30:11 +00003041 if (old_vfs) {
3042 for_all_vfs(adapter, vf_cfg, vf) {
3043 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3044 if (status)
3045 goto err;
3046 }
3047 } else {
3048 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003049 if (status)
3050 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003051 }
3052
Sathya Perla4c876612013-02-03 20:30:11 +00003053 if (old_vfs) {
3054 status = be_vfs_mac_query(adapter);
3055 if (status)
3056 goto err;
3057 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003058 status = be_vf_eth_addr_config(adapter);
3059 if (status)
3060 goto err;
3061 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003062
Sathya Perla11ac75e2011-12-13 00:58:50 +00003063 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303064 /* Allow VFs to programs MAC/VLAN filters */
3065 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3066 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3067 status = be_cmd_set_fn_privileges(adapter,
3068 privileges |
3069 BE_PRIV_FILTMGMT,
3070 vf + 1);
3071 if (!status)
3072 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3073 vf);
3074 }
3075
Sathya Perla4c876612013-02-03 20:30:11 +00003076 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3077 * Allow full available bandwidth
3078 */
3079 if (BE3_chip(adapter) && !old_vfs)
3080 be_cmd_set_qos(adapter, 1000, vf+1);
3081
3082 status = be_cmd_link_status_query(adapter, &lnk_speed,
3083 NULL, vf + 1);
3084 if (!status)
3085 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003086
3087 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003088 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003089 if (status)
3090 goto err;
3091 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003092
Vasundhara Volam05998632013-10-01 15:59:59 +05303093 if (!old_vfs)
3094 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003095 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003096
3097 if (!old_vfs) {
3098 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3099 if (status) {
3100 dev_err(dev, "SRIOV enable failed\n");
3101 adapter->num_vfs = 0;
3102 goto err;
3103 }
3104 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003105 return 0;
3106err:
Sathya Perla4c876612013-02-03 20:30:11 +00003107 dev_err(dev, "VF setup failed\n");
3108 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003109 return status;
3110}
3111
Sathya Perla92bf14a2013-08-27 16:57:32 +05303112/* On BE2/BE3 FW does not suggest the supported limits */
3113static void BEx_get_resources(struct be_adapter *adapter,
3114 struct be_resources *res)
3115{
3116 struct pci_dev *pdev = adapter->pdev;
3117 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303118 int max_vfs;
3119
3120 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303121
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303122 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303123 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303124 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303125 }
3126
3127 if (be_physfn(adapter))
3128 res->max_uc_mac = BE_UC_PMAC_COUNT;
3129 else
3130 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3131
3132 if (adapter->function_mode & FLEX10_MODE)
3133 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003134 else if (adapter->function_mode & UMC_ENABLED)
3135 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303136 else
3137 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3138 res->max_mcast_mac = BE_MAX_MC;
3139
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303140 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303141 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303142 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303143 res->max_tx_qs = 1;
3144 else
3145 res->max_tx_qs = BE3_MAX_TX_QS;
3146
3147 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3148 !use_sriov && be_physfn(adapter))
3149 res->max_rss_qs = (adapter->be3_native) ?
3150 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3151 res->max_rx_qs = res->max_rss_qs + 1;
3152
Suresh Reddye3dc8672014-01-06 13:02:25 +05303153 if (be_physfn(adapter))
3154 res->max_evt_qs = (max_vfs > 0) ?
3155 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3156 else
3157 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303158
3159 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3160 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3161 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3162}
3163
Sathya Perla30128032011-11-10 19:17:57 +00003164static void be_setup_init(struct be_adapter *adapter)
3165{
3166 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003167 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003168 adapter->if_handle = -1;
3169 adapter->be3_native = false;
3170 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003171 if (be_physfn(adapter))
3172 adapter->cmd_privileges = MAX_PRIVILEGES;
3173 else
3174 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003175}
3176
Sathya Perla92bf14a2013-08-27 16:57:32 +05303177static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003178{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303179 struct device *dev = &adapter->pdev->dev;
3180 struct be_resources res = {0};
3181 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003182
Sathya Perla92bf14a2013-08-27 16:57:32 +05303183 if (BEx_chip(adapter)) {
3184 BEx_get_resources(adapter, &res);
3185 adapter->res = res;
3186 }
3187
Sathya Perla92bf14a2013-08-27 16:57:32 +05303188 /* For Lancer, SH etc read per-function resource limits from FW.
3189 * GET_FUNC_CONFIG returns per function guaranteed limits.
3190 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3191 */
Sathya Perla4c876612013-02-03 20:30:11 +00003192 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303193 status = be_cmd_get_func_config(adapter, &res);
3194 if (status)
3195 return status;
3196
3197 /* If RoCE may be enabled stash away half the EQs for RoCE */
3198 if (be_roce_supported(adapter))
3199 res.max_evt_qs /= 2;
3200 adapter->res = res;
3201
3202 if (be_physfn(adapter)) {
3203 status = be_cmd_get_profile_config(adapter, &res, 0);
3204 if (status)
3205 return status;
3206 adapter->res.max_vfs = res.max_vfs;
3207 }
3208
3209 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3210 be_max_txqs(adapter), be_max_rxqs(adapter),
3211 be_max_rss(adapter), be_max_eqs(adapter),
3212 be_max_vfs(adapter));
3213 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3214 be_max_uc(adapter), be_max_mc(adapter),
3215 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003216 }
3217
Sathya Perla92bf14a2013-08-27 16:57:32 +05303218 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003219}
3220
Sathya Perla39f1d942012-05-08 19:41:24 +00003221/* Routine to query per function resource limits */
3222static int be_get_config(struct be_adapter *adapter)
3223{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303224 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003225 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003226
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003227 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3228 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003229 &adapter->function_caps,
3230 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003231 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303232 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003233
Vasundhara Volam542963b2014-01-15 13:23:33 +05303234 if (be_physfn(adapter)) {
3235 status = be_cmd_get_active_profile(adapter, &profile_id);
3236 if (!status)
3237 dev_info(&adapter->pdev->dev,
3238 "Using profile 0x%x\n", profile_id);
3239 }
3240
Sathya Perla92bf14a2013-08-27 16:57:32 +05303241 status = be_get_resources(adapter);
3242 if (status)
3243 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003244
3245 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303246 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3247 GFP_KERNEL);
3248 if (!adapter->pmac_id)
3249 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003250
Sathya Perla92bf14a2013-08-27 16:57:32 +05303251 /* Sanitize cfg_num_qs based on HW and platform limits */
3252 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3253
3254 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003255}
3256
Sathya Perla95046b92013-07-23 15:25:02 +05303257static int be_mac_setup(struct be_adapter *adapter)
3258{
3259 u8 mac[ETH_ALEN];
3260 int status;
3261
3262 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3263 status = be_cmd_get_perm_mac(adapter, mac);
3264 if (status)
3265 return status;
3266
3267 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3268 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3269 } else {
3270 /* Maybe the HW was reset; dev_addr must be re-programmed */
3271 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3272 }
3273
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003274 /* For BE3-R VFs, the PF programs the initial MAC address */
3275 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3276 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3277 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303278 return 0;
3279}
3280
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303281static void be_schedule_worker(struct be_adapter *adapter)
3282{
3283 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3284 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3285}
3286
Sathya Perla77071332013-08-27 16:57:34 +05303287static int be_setup_queues(struct be_adapter *adapter)
3288{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303289 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303290 int status;
3291
3292 status = be_evt_queues_create(adapter);
3293 if (status)
3294 goto err;
3295
3296 status = be_tx_qs_create(adapter);
3297 if (status)
3298 goto err;
3299
3300 status = be_rx_cqs_create(adapter);
3301 if (status)
3302 goto err;
3303
3304 status = be_mcc_queues_create(adapter);
3305 if (status)
3306 goto err;
3307
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303308 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3309 if (status)
3310 goto err;
3311
3312 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3313 if (status)
3314 goto err;
3315
Sathya Perla77071332013-08-27 16:57:34 +05303316 return 0;
3317err:
3318 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3319 return status;
3320}
3321
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303322int be_update_queues(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
3325 int status;
3326
3327 if (netif_running(netdev))
3328 be_close(netdev);
3329
3330 be_cancel_worker(adapter);
3331
3332 /* If any vectors have been shared with RoCE we cannot re-program
3333 * the MSIx table.
3334 */
3335 if (!adapter->num_msix_roce_vec)
3336 be_msix_disable(adapter);
3337
3338 be_clear_queues(adapter);
3339
3340 if (!msix_enabled(adapter)) {
3341 status = be_msix_enable(adapter);
3342 if (status)
3343 return status;
3344 }
3345
3346 status = be_setup_queues(adapter);
3347 if (status)
3348 return status;
3349
3350 be_schedule_worker(adapter);
3351
3352 if (netif_running(netdev))
3353 status = be_open(netdev);
3354
3355 return status;
3356}
3357
Sathya Perla5fb379e2009-06-18 00:02:59 +00003358static int be_setup(struct be_adapter *adapter)
3359{
Sathya Perla39f1d942012-05-08 19:41:24 +00003360 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303361 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003362 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363
Sathya Perla30128032011-11-10 19:17:57 +00003364 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003365
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003366 if (!lancer_chip(adapter))
3367 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003368
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003369 status = be_get_config(adapter);
3370 if (status)
3371 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003372
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003373 status = be_msix_enable(adapter);
3374 if (status)
3375 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003376
Sathya Perla77071332013-08-27 16:57:34 +05303377 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3378 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3379 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3380 en_flags |= BE_IF_FLAGS_RSS;
3381 en_flags = en_flags & be_if_cap_flags(adapter);
3382 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3383 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003384 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003385 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003386
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303387 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3388 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303389 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303390 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003391 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003392 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003393
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003394 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003395
Sathya Perla95046b92013-07-23 15:25:02 +05303396 status = be_mac_setup(adapter);
3397 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003398 goto err;
3399
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003400 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003401
Somnath Koture9e2a902013-10-24 14:37:53 +05303402 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3403 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3404 adapter->fw_ver);
3405 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3406 }
3407
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003408 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003409 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003410
3411 be_set_rx_mode(adapter->netdev);
3412
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003413 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003414
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003415 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3416 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003417 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003418
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303419 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303420 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003421 be_vf_setup(adapter);
3422 else
3423 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003424 }
3425
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003426 status = be_cmd_get_phy_info(adapter);
3427 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003428 adapter->phy.fc_autoneg = 1;
3429
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303430 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003431 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003432err:
3433 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003434 return status;
3435}
3436
Ivan Vecera66268732011-12-08 01:31:21 +00003437#ifdef CONFIG_NET_POLL_CONTROLLER
3438static void be_netpoll(struct net_device *netdev)
3439{
3440 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003441 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003442 int i;
3443
Sathya Perlae49cc342012-11-27 19:50:02 +00003444 for_all_evt_queues(adapter, eqo, i) {
3445 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3446 napi_schedule(&eqo->napi);
3447 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003448
3449 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003450}
3451#endif
3452
Ajit Khaparde84517482009-09-04 03:12:16 +00003453#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003454static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003455
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003456static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003457 const u8 *p, u32 img_start, int image_size,
3458 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003459{
3460 u32 crc_offset;
3461 u8 flashed_crc[4];
3462 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003463
3464 crc_offset = hdr_size + img_start + image_size - 4;
3465
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003466 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003467
3468 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003469 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003470 if (status) {
3471 dev_err(&adapter->pdev->dev,
3472 "could not get crc from flash, not flashing redboot\n");
3473 return false;
3474 }
3475
3476 /*update redboot only if crc does not match*/
3477 if (!memcmp(flashed_crc, p, 4))
3478 return false;
3479 else
3480 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003481}
3482
Sathya Perla306f1342011-08-02 19:57:45 +00003483static bool phy_flashing_required(struct be_adapter *adapter)
3484{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003485 return (adapter->phy.phy_type == TN_8022 &&
3486 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003487}
3488
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003489static bool is_comp_in_ufi(struct be_adapter *adapter,
3490 struct flash_section_info *fsec, int type)
3491{
3492 int i = 0, img_type = 0;
3493 struct flash_section_info_g2 *fsec_g2 = NULL;
3494
Sathya Perlaca34fe32012-11-06 17:48:56 +00003495 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003496 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3497
3498 for (i = 0; i < MAX_FLASH_COMP; i++) {
3499 if (fsec_g2)
3500 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3501 else
3502 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3503
3504 if (img_type == type)
3505 return true;
3506 }
3507 return false;
3508
3509}
3510
Jingoo Han4188e7d2013-08-05 18:02:02 +09003511static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003512 int header_size,
3513 const struct firmware *fw)
3514{
3515 struct flash_section_info *fsec = NULL;
3516 const u8 *p = fw->data;
3517
3518 p += header_size;
3519 while (p < (fw->data + fw->size)) {
3520 fsec = (struct flash_section_info *)p;
3521 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3522 return fsec;
3523 p += 32;
3524 }
3525 return NULL;
3526}
3527
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003528static int be_flash(struct be_adapter *adapter, const u8 *img,
3529 struct be_dma_mem *flash_cmd, int optype, int img_size)
3530{
3531 u32 total_bytes = 0, flash_op, num_bytes = 0;
3532 int status = 0;
3533 struct be_cmd_write_flashrom *req = flash_cmd->va;
3534
3535 total_bytes = img_size;
3536 while (total_bytes) {
3537 num_bytes = min_t(u32, 32*1024, total_bytes);
3538
3539 total_bytes -= num_bytes;
3540
3541 if (!total_bytes) {
3542 if (optype == OPTYPE_PHY_FW)
3543 flash_op = FLASHROM_OPER_PHY_FLASH;
3544 else
3545 flash_op = FLASHROM_OPER_FLASH;
3546 } else {
3547 if (optype == OPTYPE_PHY_FW)
3548 flash_op = FLASHROM_OPER_PHY_SAVE;
3549 else
3550 flash_op = FLASHROM_OPER_SAVE;
3551 }
3552
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003553 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003554 img += num_bytes;
3555 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3556 flash_op, num_bytes);
3557 if (status) {
3558 if (status == ILLEGAL_IOCTL_REQ &&
3559 optype == OPTYPE_PHY_FW)
3560 break;
3561 dev_err(&adapter->pdev->dev,
3562 "cmd to write to flash rom failed.\n");
3563 return status;
3564 }
3565 }
3566 return 0;
3567}
3568
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003569/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003570static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003571 const struct firmware *fw,
3572 struct be_dma_mem *flash_cmd,
3573 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003574
Ajit Khaparde84517482009-09-04 03:12:16 +00003575{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003576 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003577 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003578 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003579 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003580 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003581 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003582
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003583 struct flash_comp gen3_flash_types[] = {
3584 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3585 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3586 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3587 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3588 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3589 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3590 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3592 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3593 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3594 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3595 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3596 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3598 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3599 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3600 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3601 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3602 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3603 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003604 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003605
3606 struct flash_comp gen2_flash_types[] = {
3607 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3608 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3609 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3610 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3611 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3612 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3613 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3615 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3616 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3617 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3618 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3619 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3621 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3622 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003623 };
3624
Sathya Perlaca34fe32012-11-06 17:48:56 +00003625 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003626 pflashcomp = gen3_flash_types;
3627 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003628 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003629 } else {
3630 pflashcomp = gen2_flash_types;
3631 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003632 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003633 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003634
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003635 /* Get flash section info*/
3636 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3637 if (!fsec) {
3638 dev_err(&adapter->pdev->dev,
3639 "Invalid Cookie. UFI corrupted ?\n");
3640 return -1;
3641 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003642 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003643 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003644 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003645
3646 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3647 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3648 continue;
3649
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003650 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3651 !phy_flashing_required(adapter))
3652 continue;
3653
3654 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3655 redboot = be_flash_redboot(adapter, fw->data,
3656 pflashcomp[i].offset, pflashcomp[i].size,
3657 filehdr_size + img_hdrs_size);
3658 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003659 continue;
3660 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003661
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003662 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003663 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003664 if (p + pflashcomp[i].size > fw->data + fw->size)
3665 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003666
3667 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3668 pflashcomp[i].size);
3669 if (status) {
3670 dev_err(&adapter->pdev->dev,
3671 "Flashing section type %d failed.\n",
3672 pflashcomp[i].img_type);
3673 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003674 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003675 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003676 return 0;
3677}
3678
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003679static int be_flash_skyhawk(struct be_adapter *adapter,
3680 const struct firmware *fw,
3681 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003682{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003683 int status = 0, i, filehdr_size = 0;
3684 int img_offset, img_size, img_optype, redboot;
3685 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3686 const u8 *p = fw->data;
3687 struct flash_section_info *fsec = NULL;
3688
3689 filehdr_size = sizeof(struct flash_file_hdr_g3);
3690 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3691 if (!fsec) {
3692 dev_err(&adapter->pdev->dev,
3693 "Invalid Cookie. UFI corrupted ?\n");
3694 return -1;
3695 }
3696
3697 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3698 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3699 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3700
3701 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3702 case IMAGE_FIRMWARE_iSCSI:
3703 img_optype = OPTYPE_ISCSI_ACTIVE;
3704 break;
3705 case IMAGE_BOOT_CODE:
3706 img_optype = OPTYPE_REDBOOT;
3707 break;
3708 case IMAGE_OPTION_ROM_ISCSI:
3709 img_optype = OPTYPE_BIOS;
3710 break;
3711 case IMAGE_OPTION_ROM_PXE:
3712 img_optype = OPTYPE_PXE_BIOS;
3713 break;
3714 case IMAGE_OPTION_ROM_FCoE:
3715 img_optype = OPTYPE_FCOE_BIOS;
3716 break;
3717 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3718 img_optype = OPTYPE_ISCSI_BACKUP;
3719 break;
3720 case IMAGE_NCSI:
3721 img_optype = OPTYPE_NCSI_FW;
3722 break;
3723 default:
3724 continue;
3725 }
3726
3727 if (img_optype == OPTYPE_REDBOOT) {
3728 redboot = be_flash_redboot(adapter, fw->data,
3729 img_offset, img_size,
3730 filehdr_size + img_hdrs_size);
3731 if (!redboot)
3732 continue;
3733 }
3734
3735 p = fw->data;
3736 p += filehdr_size + img_offset + img_hdrs_size;
3737 if (p + img_size > fw->data + fw->size)
3738 return -1;
3739
3740 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3741 if (status) {
3742 dev_err(&adapter->pdev->dev,
3743 "Flashing section type %d failed.\n",
3744 fsec->fsec_entry[i].type);
3745 return status;
3746 }
3747 }
3748 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003749}
3750
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003751static int lancer_fw_download(struct be_adapter *adapter,
3752 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003753{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003754#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3755#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3756 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003757 const u8 *data_ptr = NULL;
3758 u8 *dest_image_ptr = NULL;
3759 size_t image_size = 0;
3760 u32 chunk_size = 0;
3761 u32 data_written = 0;
3762 u32 offset = 0;
3763 int status = 0;
3764 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003765 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003766
3767 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3768 dev_err(&adapter->pdev->dev,
3769 "FW Image not properly aligned. "
3770 "Length must be 4 byte aligned.\n");
3771 status = -EINVAL;
3772 goto lancer_fw_exit;
3773 }
3774
3775 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3776 + LANCER_FW_DOWNLOAD_CHUNK;
3777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003778 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003779 if (!flash_cmd.va) {
3780 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003781 goto lancer_fw_exit;
3782 }
3783
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003784 dest_image_ptr = flash_cmd.va +
3785 sizeof(struct lancer_cmd_req_write_object);
3786 image_size = fw->size;
3787 data_ptr = fw->data;
3788
3789 while (image_size) {
3790 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3791
3792 /* Copy the image chunk content. */
3793 memcpy(dest_image_ptr, data_ptr, chunk_size);
3794
3795 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003796 chunk_size, offset,
3797 LANCER_FW_DOWNLOAD_LOCATION,
3798 &data_written, &change_status,
3799 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003800 if (status)
3801 break;
3802
3803 offset += data_written;
3804 data_ptr += data_written;
3805 image_size -= data_written;
3806 }
3807
3808 if (!status) {
3809 /* Commit the FW written */
3810 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003811 0, offset,
3812 LANCER_FW_DOWNLOAD_LOCATION,
3813 &data_written, &change_status,
3814 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003815 }
3816
3817 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3818 flash_cmd.dma);
3819 if (status) {
3820 dev_err(&adapter->pdev->dev,
3821 "Firmware load error. "
3822 "Status code: 0x%x Additional Status: 0x%x\n",
3823 status, add_status);
3824 goto lancer_fw_exit;
3825 }
3826
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003827 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303828 dev_info(&adapter->pdev->dev,
3829 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003830 status = lancer_physdev_ctrl(adapter,
3831 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003832 if (status) {
3833 dev_err(&adapter->pdev->dev,
3834 "Adapter busy for FW reset.\n"
3835 "New FW will not be active.\n");
3836 goto lancer_fw_exit;
3837 }
3838 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3839 dev_err(&adapter->pdev->dev,
3840 "System reboot required for new FW"
3841 " to be active\n");
3842 }
3843
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003844 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3845lancer_fw_exit:
3846 return status;
3847}
3848
Sathya Perlaca34fe32012-11-06 17:48:56 +00003849#define UFI_TYPE2 2
3850#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003851#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003852#define UFI_TYPE4 4
3853static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003854 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003855{
3856 if (fhdr == NULL)
3857 goto be_get_ufi_exit;
3858
Sathya Perlaca34fe32012-11-06 17:48:56 +00003859 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3860 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003861 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3862 if (fhdr->asic_type_rev == 0x10)
3863 return UFI_TYPE3R;
3864 else
3865 return UFI_TYPE3;
3866 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003867 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003868
3869be_get_ufi_exit:
3870 dev_err(&adapter->pdev->dev,
3871 "UFI and Interface are not compatible for flashing\n");
3872 return -1;
3873}
3874
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003875static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3876{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003877 struct flash_file_hdr_g3 *fhdr3;
3878 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003879 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003880 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003881 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003882
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003883 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003884 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3885 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003886 if (!flash_cmd.va) {
3887 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003888 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003889 }
3890
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003891 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003892 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003893
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003894 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003895
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003896 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3897 for (i = 0; i < num_imgs; i++) {
3898 img_hdr_ptr = (struct image_hdr *)(fw->data +
3899 (sizeof(struct flash_file_hdr_g3) +
3900 i * sizeof(struct image_hdr)));
3901 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003902 switch (ufi_type) {
3903 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003904 status = be_flash_skyhawk(adapter, fw,
3905 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003906 break;
3907 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003908 status = be_flash_BEx(adapter, fw, &flash_cmd,
3909 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003910 break;
3911 case UFI_TYPE3:
3912 /* Do not flash this ufi on BE3-R cards */
3913 if (adapter->asic_rev < 0x10)
3914 status = be_flash_BEx(adapter, fw,
3915 &flash_cmd,
3916 num_imgs);
3917 else {
3918 status = -1;
3919 dev_err(&adapter->pdev->dev,
3920 "Can't load BE3 UFI on BE3R\n");
3921 }
3922 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003923 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003924 }
3925
Sathya Perlaca34fe32012-11-06 17:48:56 +00003926 if (ufi_type == UFI_TYPE2)
3927 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003928 else if (ufi_type == -1)
3929 status = -1;
3930
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003931 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3932 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003933 if (status) {
3934 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003935 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003936 }
3937
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003938 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003939
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003940be_fw_exit:
3941 return status;
3942}
3943
3944int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3945{
3946 const struct firmware *fw;
3947 int status;
3948
3949 if (!netif_running(adapter->netdev)) {
3950 dev_err(&adapter->pdev->dev,
3951 "Firmware load not allowed (interface is down)\n");
3952 return -1;
3953 }
3954
3955 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3956 if (status)
3957 goto fw_exit;
3958
3959 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3960
3961 if (lancer_chip(adapter))
3962 status = lancer_fw_download(adapter, fw);
3963 else
3964 status = be_fw_download(adapter, fw);
3965
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003966 if (!status)
3967 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3968 adapter->fw_on_flash);
3969
Ajit Khaparde84517482009-09-04 03:12:16 +00003970fw_exit:
3971 release_firmware(fw);
3972 return status;
3973}
3974
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003975static int be_ndo_bridge_setlink(struct net_device *dev,
3976 struct nlmsghdr *nlh)
3977{
3978 struct be_adapter *adapter = netdev_priv(dev);
3979 struct nlattr *attr, *br_spec;
3980 int rem;
3981 int status = 0;
3982 u16 mode = 0;
3983
3984 if (!sriov_enabled(adapter))
3985 return -EOPNOTSUPP;
3986
3987 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3988
3989 nla_for_each_nested(attr, br_spec, rem) {
3990 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3991 continue;
3992
3993 mode = nla_get_u16(attr);
3994 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3995 return -EINVAL;
3996
3997 status = be_cmd_set_hsw_config(adapter, 0, 0,
3998 adapter->if_handle,
3999 mode == BRIDGE_MODE_VEPA ?
4000 PORT_FWD_TYPE_VEPA :
4001 PORT_FWD_TYPE_VEB);
4002 if (status)
4003 goto err;
4004
4005 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4006 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4007
4008 return status;
4009 }
4010err:
4011 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4012 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4013
4014 return status;
4015}
4016
4017static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4018 struct net_device *dev,
4019 u32 filter_mask)
4020{
4021 struct be_adapter *adapter = netdev_priv(dev);
4022 int status = 0;
4023 u8 hsw_mode;
4024
4025 if (!sriov_enabled(adapter))
4026 return 0;
4027
4028 /* BE and Lancer chips support VEB mode only */
4029 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4030 hsw_mode = PORT_FWD_TYPE_VEB;
4031 } else {
4032 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4033 adapter->if_handle, &hsw_mode);
4034 if (status)
4035 return 0;
4036 }
4037
4038 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4039 hsw_mode == PORT_FWD_TYPE_VEPA ?
4040 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4041}
4042
stephen hemmingere5686ad2012-01-05 19:10:25 +00004043static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004044 .ndo_open = be_open,
4045 .ndo_stop = be_close,
4046 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004047 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004048 .ndo_set_mac_address = be_mac_addr_set,
4049 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004050 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004051 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004052 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4053 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004054 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004055 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004056 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004057 .ndo_get_vf_config = be_get_vf_config,
4058#ifdef CONFIG_NET_POLL_CONTROLLER
4059 .ndo_poll_controller = be_netpoll,
4060#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004061 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4062 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304063#ifdef CONFIG_NET_RX_BUSY_POLL
4064 .ndo_busy_poll = be_busy_poll
4065#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004066};
4067
4068static void be_netdev_init(struct net_device *netdev)
4069{
4070 struct be_adapter *adapter = netdev_priv(netdev);
4071
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004072 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004073 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004074 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004075 if (be_multi_rxq(adapter))
4076 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004077
4078 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004079 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004080
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004081 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004082 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004083
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004084 netdev->priv_flags |= IFF_UNICAST_FLT;
4085
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004086 netdev->flags |= IFF_MULTICAST;
4087
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004088 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004089
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004090 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004091
4092 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093}
4094
4095static void be_unmap_pci_bars(struct be_adapter *adapter)
4096{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004097 if (adapter->csr)
4098 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004099 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004100 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004101}
4102
Sathya Perlace66f782012-11-06 17:48:58 +00004103static int db_bar(struct be_adapter *adapter)
4104{
4105 if (lancer_chip(adapter) || !be_physfn(adapter))
4106 return 0;
4107 else
4108 return 4;
4109}
4110
4111static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004112{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004113 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004114 adapter->roce_db.size = 4096;
4115 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4116 db_bar(adapter));
4117 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4118 db_bar(adapter));
4119 }
Parav Pandit045508a2012-03-26 14:27:13 +00004120 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004121}
4122
4123static int be_map_pci_bars(struct be_adapter *adapter)
4124{
4125 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004126
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004127 if (BEx_chip(adapter) && be_physfn(adapter)) {
4128 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4129 if (adapter->csr == NULL)
4130 return -ENOMEM;
4131 }
4132
Sathya Perlace66f782012-11-06 17:48:58 +00004133 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004134 if (addr == NULL)
4135 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004136 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004137
4138 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004141pci_map_err:
4142 be_unmap_pci_bars(adapter);
4143 return -ENOMEM;
4144}
4145
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146static void be_ctrl_cleanup(struct be_adapter *adapter)
4147{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004148 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004149
4150 be_unmap_pci_bars(adapter);
4151
4152 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004153 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4154 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004155
Sathya Perla5b8821b2011-08-02 19:57:44 +00004156 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004157 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004158 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4159 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004160}
4161
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004162static int be_ctrl_init(struct be_adapter *adapter)
4163{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004164 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4165 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004166 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004167 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004168 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004169
Sathya Perlace66f782012-11-06 17:48:58 +00004170 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4171 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4172 SLI_INTF_FAMILY_SHIFT;
4173 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4174
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004175 status = be_map_pci_bars(adapter);
4176 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004177 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004178
4179 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004180 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4181 mbox_mem_alloc->size,
4182 &mbox_mem_alloc->dma,
4183 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004184 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004185 status = -ENOMEM;
4186 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004187 }
4188 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4189 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4190 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4191 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004192
Sathya Perla5b8821b2011-08-02 19:57:44 +00004193 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004194 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4195 rx_filter->size, &rx_filter->dma,
4196 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004197 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004198 status = -ENOMEM;
4199 goto free_mbox;
4200 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004201
Ivan Vecera29849612010-12-14 05:43:19 +00004202 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004203 spin_lock_init(&adapter->mcc_lock);
4204 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004205
Suresh Reddy5eeff632014-01-06 13:02:24 +05304206 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004207 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004208 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004209
4210free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004211 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4212 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004213
4214unmap_pci_bars:
4215 be_unmap_pci_bars(adapter);
4216
4217done:
4218 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004219}
4220
4221static void be_stats_cleanup(struct be_adapter *adapter)
4222{
Sathya Perla3abcded2010-10-03 22:12:27 -07004223 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004224
4225 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004226 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4227 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004228}
4229
4230static int be_stats_init(struct be_adapter *adapter)
4231{
Sathya Perla3abcded2010-10-03 22:12:27 -07004232 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004233
Sathya Perlaca34fe32012-11-06 17:48:56 +00004234 if (lancer_chip(adapter))
4235 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4236 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004237 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004238 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004239 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004240 else
4241 /* ALL non-BE ASICs */
4242 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004243
Joe Perchesede23fa82013-08-26 22:45:23 -07004244 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4245 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004246 if (cmd->va == NULL)
4247 return -1;
4248 return 0;
4249}
4250
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004251static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004252{
4253 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 if (!adapter)
4256 return;
4257
Parav Pandit045508a2012-03-26 14:27:13 +00004258 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004259 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004260
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004261 cancel_delayed_work_sync(&adapter->func_recovery_work);
4262
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004263 unregister_netdev(adapter->netdev);
4264
Sathya Perla5fb379e2009-06-18 00:02:59 +00004265 be_clear(adapter);
4266
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004267 /* tell fw we're done with firing cmds */
4268 be_cmd_fw_clean(adapter);
4269
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004270 be_stats_cleanup(adapter);
4271
4272 be_ctrl_cleanup(adapter);
4273
Sathya Perlad6b6d982012-09-05 01:56:48 +00004274 pci_disable_pcie_error_reporting(pdev);
4275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004276 pci_release_regions(pdev);
4277 pci_disable_device(pdev);
4278
4279 free_netdev(adapter->netdev);
4280}
4281
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004282bool be_is_wol_supported(struct be_adapter *adapter)
4283{
4284 return ((adapter->wol_cap & BE_WOL_CAP) &&
4285 !be_is_wol_excluded(adapter)) ? true : false;
4286}
4287
Sathya Perla39f1d942012-05-08 19:41:24 +00004288static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004289{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304290 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004291
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004292 status = be_cmd_get_cntl_attributes(adapter);
4293 if (status)
4294 return status;
4295
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004296 status = be_cmd_get_acpi_wol_cap(adapter);
4297 if (status) {
4298 /* in case of a failure to get wol capabillities
4299 * check the exclusion list to determine WOL capability */
4300 if (!be_is_wol_excluded(adapter))
4301 adapter->wol_cap |= BE_WOL_CAP;
4302 }
4303
4304 if (be_is_wol_supported(adapter))
4305 adapter->wol = true;
4306
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004307 /* Must be a power of 2 or else MODULO will BUG_ON */
4308 adapter->be_get_temp_freq = 64;
4309
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304310 if (BEx_chip(adapter)) {
4311 level = be_cmd_get_fw_log_level(adapter);
4312 adapter->msg_enable =
4313 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4314 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004315
Sathya Perla92bf14a2013-08-27 16:57:32 +05304316 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004317 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004318}
4319
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004320static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004321{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004322 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004323 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004324
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004325 status = lancer_test_and_set_rdy_state(adapter);
4326 if (status)
4327 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004328
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004329 if (netif_running(adapter->netdev))
4330 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004331
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004332 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004333
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004334 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004335
4336 status = be_setup(adapter);
4337 if (status)
4338 goto err;
4339
4340 if (netif_running(adapter->netdev)) {
4341 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004342 if (status)
4343 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004344 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004345
Somnath Kotur4bebb562013-12-05 12:07:55 +05304346 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004347 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004348err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004349 if (status == -EAGAIN)
4350 dev_err(dev, "Waiting for resource provisioning\n");
4351 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304352 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004353
4354 return status;
4355}
4356
4357static void be_func_recovery_task(struct work_struct *work)
4358{
4359 struct be_adapter *adapter =
4360 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004361 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004362
4363 be_detect_error(adapter);
4364
4365 if (adapter->hw_error && lancer_chip(adapter)) {
4366
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004367 rtnl_lock();
4368 netif_device_detach(adapter->netdev);
4369 rtnl_unlock();
4370
4371 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004372 if (!status)
4373 netif_device_attach(adapter->netdev);
4374 }
4375
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004376 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4377 * no need to attempt further recovery.
4378 */
4379 if (!status || status == -EAGAIN)
4380 schedule_delayed_work(&adapter->func_recovery_work,
4381 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004382}
4383
4384static void be_worker(struct work_struct *work)
4385{
4386 struct be_adapter *adapter =
4387 container_of(work, struct be_adapter, work.work);
4388 struct be_rx_obj *rxo;
4389 int i;
4390
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004391 /* when interrupts are not yet enabled, just reap any pending
4392 * mcc completions */
4393 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004394 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004395 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004396 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004397 goto reschedule;
4398 }
4399
4400 if (!adapter->stats_cmd_sent) {
4401 if (lancer_chip(adapter))
4402 lancer_cmd_get_pport_stats(adapter,
4403 &adapter->stats_cmd);
4404 else
4405 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4406 }
4407
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304408 if (be_physfn(adapter) &&
4409 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004410 be_cmd_get_die_temperature(adapter);
4411
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004412 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304413 /* Replenish RX-queues starved due to memory
4414 * allocation failures.
4415 */
4416 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004417 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004418 }
4419
Sathya Perla2632baf2013-10-01 16:00:00 +05304420 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004421
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004422reschedule:
4423 adapter->work_counter++;
4424 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4425}
4426
Sathya Perla257a3fe2013-06-14 15:54:51 +05304427/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004428static bool be_reset_required(struct be_adapter *adapter)
4429{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304430 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004431}
4432
Sathya Perlad3791422012-09-28 04:39:44 +00004433static char *mc_name(struct be_adapter *adapter)
4434{
4435 if (adapter->function_mode & FLEX10_MODE)
4436 return "FLEX10";
4437 else if (adapter->function_mode & VNIC_MODE)
4438 return "vNIC";
4439 else if (adapter->function_mode & UMC_ENABLED)
4440 return "UMC";
4441 else
4442 return "";
4443}
4444
4445static inline char *func_name(struct be_adapter *adapter)
4446{
4447 return be_physfn(adapter) ? "PF" : "VF";
4448}
4449
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004450static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004451{
4452 int status = 0;
4453 struct be_adapter *adapter;
4454 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004455 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004456
4457 status = pci_enable_device(pdev);
4458 if (status)
4459 goto do_none;
4460
4461 status = pci_request_regions(pdev, DRV_NAME);
4462 if (status)
4463 goto disable_dev;
4464 pci_set_master(pdev);
4465
Sathya Perla7f640062012-06-05 19:37:20 +00004466 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004467 if (netdev == NULL) {
4468 status = -ENOMEM;
4469 goto rel_reg;
4470 }
4471 adapter = netdev_priv(netdev);
4472 adapter->pdev = pdev;
4473 pci_set_drvdata(pdev, adapter);
4474 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004475 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004476
Russell King4c15c242013-06-26 23:49:11 +01004477 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004478 if (!status) {
4479 netdev->features |= NETIF_F_HIGHDMA;
4480 } else {
Russell King4c15c242013-06-26 23:49:11 +01004481 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004482 if (status) {
4483 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4484 goto free_netdev;
4485 }
4486 }
4487
Ajit Khapardeea58c182013-10-18 16:06:24 -05004488 if (be_physfn(adapter)) {
4489 status = pci_enable_pcie_error_reporting(pdev);
4490 if (!status)
4491 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4492 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004494 status = be_ctrl_init(adapter);
4495 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004496 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004497
Sathya Perla2243e2e2009-11-22 22:02:03 +00004498 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004499 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004500 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004501 if (status)
4502 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004503 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004504
Sathya Perla39f1d942012-05-08 19:41:24 +00004505 if (be_reset_required(adapter)) {
4506 status = be_cmd_reset_function(adapter);
4507 if (status)
4508 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004509
Kalesh AP2d177be2013-04-28 22:22:29 +00004510 /* Wait for interrupts to quiesce after an FLR */
4511 msleep(100);
4512 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004513
4514 /* Allow interrupts for other ULPs running on NIC function */
4515 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004516
Kalesh AP2d177be2013-04-28 22:22:29 +00004517 /* tell fw we're ready to fire cmds */
4518 status = be_cmd_fw_init(adapter);
4519 if (status)
4520 goto ctrl_clean;
4521
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004522 status = be_stats_init(adapter);
4523 if (status)
4524 goto ctrl_clean;
4525
Sathya Perla39f1d942012-05-08 19:41:24 +00004526 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527 if (status)
4528 goto stats_clean;
4529
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004530 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004531 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004532 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004533
Sathya Perla5fb379e2009-06-18 00:02:59 +00004534 status = be_setup(adapter);
4535 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004536 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004537
Sathya Perla3abcded2010-10-03 22:12:27 -07004538 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004539 status = register_netdev(netdev);
4540 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004541 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542
Parav Pandit045508a2012-03-26 14:27:13 +00004543 be_roce_dev_add(adapter);
4544
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004545 schedule_delayed_work(&adapter->func_recovery_work,
4546 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004547
4548 be_cmd_query_port_name(adapter, &port_name);
4549
Sathya Perlad3791422012-09-28 04:39:44 +00004550 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4551 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004552
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004553 return 0;
4554
Sathya Perla5fb379e2009-06-18 00:02:59 +00004555unsetup:
4556 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004557stats_clean:
4558 be_stats_cleanup(adapter);
4559ctrl_clean:
4560 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004561free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004562 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004563rel_reg:
4564 pci_release_regions(pdev);
4565disable_dev:
4566 pci_disable_device(pdev);
4567do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004568 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004569 return status;
4570}
4571
4572static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4573{
4574 struct be_adapter *adapter = pci_get_drvdata(pdev);
4575 struct net_device *netdev = adapter->netdev;
4576
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004577 if (adapter->wol)
4578 be_setup_wol(adapter, true);
4579
Ajit Khaparded4360d62013-11-22 12:51:09 -06004580 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004581 cancel_delayed_work_sync(&adapter->func_recovery_work);
4582
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004583 netif_device_detach(netdev);
4584 if (netif_running(netdev)) {
4585 rtnl_lock();
4586 be_close(netdev);
4587 rtnl_unlock();
4588 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004589 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004590
4591 pci_save_state(pdev);
4592 pci_disable_device(pdev);
4593 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4594 return 0;
4595}
4596
4597static int be_resume(struct pci_dev *pdev)
4598{
4599 int status = 0;
4600 struct be_adapter *adapter = pci_get_drvdata(pdev);
4601 struct net_device *netdev = adapter->netdev;
4602
4603 netif_device_detach(netdev);
4604
4605 status = pci_enable_device(pdev);
4606 if (status)
4607 return status;
4608
Yijing Wang1ca01512013-06-27 20:53:42 +08004609 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004610 pci_restore_state(pdev);
4611
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304612 status = be_fw_wait_ready(adapter);
4613 if (status)
4614 return status;
4615
Ajit Khaparded4360d62013-11-22 12:51:09 -06004616 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004617 /* tell fw we're ready to fire cmds */
4618 status = be_cmd_fw_init(adapter);
4619 if (status)
4620 return status;
4621
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004622 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004623 if (netif_running(netdev)) {
4624 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625 be_open(netdev);
4626 rtnl_unlock();
4627 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004628
4629 schedule_delayed_work(&adapter->func_recovery_work,
4630 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004631 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004632
4633 if (adapter->wol)
4634 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004636 return 0;
4637}
4638
Sathya Perla82456b02010-02-17 01:35:37 +00004639/*
4640 * An FLR will stop BE from DMAing any data.
4641 */
4642static void be_shutdown(struct pci_dev *pdev)
4643{
4644 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004645
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004646 if (!adapter)
4647 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004648
Sathya Perla0f4a6822011-03-21 20:49:28 +00004649 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004650 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004651
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004652 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004653
Ajit Khaparde57841862011-04-06 18:08:43 +00004654 be_cmd_reset_function(adapter);
4655
Sathya Perla82456b02010-02-17 01:35:37 +00004656 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004657}
4658
Sathya Perlacf588472010-02-14 21:22:01 +00004659static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4660 pci_channel_state_t state)
4661{
4662 struct be_adapter *adapter = pci_get_drvdata(pdev);
4663 struct net_device *netdev = adapter->netdev;
4664
4665 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4666
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004667 if (!adapter->eeh_error) {
4668 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004669
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004670 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004671
Sathya Perlacf588472010-02-14 21:22:01 +00004672 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004673 netif_device_detach(netdev);
4674 if (netif_running(netdev))
4675 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004676 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004677
4678 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004679 }
Sathya Perlacf588472010-02-14 21:22:01 +00004680
4681 if (state == pci_channel_io_perm_failure)
4682 return PCI_ERS_RESULT_DISCONNECT;
4683
4684 pci_disable_device(pdev);
4685
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004686 /* The error could cause the FW to trigger a flash debug dump.
4687 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004688 * can cause it not to recover; wait for it to finish.
4689 * Wait only for first function as it is needed only once per
4690 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004691 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004692 if (pdev->devfn == 0)
4693 ssleep(30);
4694
Sathya Perlacf588472010-02-14 21:22:01 +00004695 return PCI_ERS_RESULT_NEED_RESET;
4696}
4697
4698static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4699{
4700 struct be_adapter *adapter = pci_get_drvdata(pdev);
4701 int status;
4702
4703 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004704
4705 status = pci_enable_device(pdev);
4706 if (status)
4707 return PCI_ERS_RESULT_DISCONNECT;
4708
4709 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004710 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004711 pci_restore_state(pdev);
4712
4713 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004714 dev_info(&adapter->pdev->dev,
4715 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004716 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004717 if (status)
4718 return PCI_ERS_RESULT_DISCONNECT;
4719
Sathya Perlad6b6d982012-09-05 01:56:48 +00004720 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004721 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004722 return PCI_ERS_RESULT_RECOVERED;
4723}
4724
4725static void be_eeh_resume(struct pci_dev *pdev)
4726{
4727 int status = 0;
4728 struct be_adapter *adapter = pci_get_drvdata(pdev);
4729 struct net_device *netdev = adapter->netdev;
4730
4731 dev_info(&adapter->pdev->dev, "EEH resume\n");
4732
4733 pci_save_state(pdev);
4734
Kalesh AP2d177be2013-04-28 22:22:29 +00004735 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004736 if (status)
4737 goto err;
4738
Kalesh AP2d177be2013-04-28 22:22:29 +00004739 /* tell fw we're ready to fire cmds */
4740 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004741 if (status)
4742 goto err;
4743
Sathya Perlacf588472010-02-14 21:22:01 +00004744 status = be_setup(adapter);
4745 if (status)
4746 goto err;
4747
4748 if (netif_running(netdev)) {
4749 status = be_open(netdev);
4750 if (status)
4751 goto err;
4752 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004753
4754 schedule_delayed_work(&adapter->func_recovery_work,
4755 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004756 netif_device_attach(netdev);
4757 return;
4758err:
4759 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004760}
4761
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004762static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004763 .error_detected = be_eeh_err_detected,
4764 .slot_reset = be_eeh_reset,
4765 .resume = be_eeh_resume,
4766};
4767
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004768static struct pci_driver be_driver = {
4769 .name = DRV_NAME,
4770 .id_table = be_dev_ids,
4771 .probe = be_probe,
4772 .remove = be_remove,
4773 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004774 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004775 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004776 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004777};
4778
4779static int __init be_init_module(void)
4780{
Joe Perches8e95a202009-12-03 07:58:21 +00004781 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4782 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004783 printk(KERN_WARNING DRV_NAME
4784 " : Module param rx_frag_size must be 2048/4096/8192."
4785 " Using 2048\n");
4786 rx_frag_size = 2048;
4787 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004789 return pci_register_driver(&be_driver);
4790}
4791module_init(be_init_module);
4792
4793static void __exit be_exit_module(void)
4794{
4795 pci_unregister_driver(&be_driver);
4796}
4797module_exit(be_exit_module);