blob: 45662af28928619090eee250731c30a032cdd202 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000925 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500926 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000927 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000936 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000937 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
946
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000953
Somnath Kotur93040ae2012-06-26 22:32:10 +0000954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000983 if (unlikely(!skb))
984 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000985 }
986
Sathya Perlaee9c7992013-05-22 23:04:55 +0000987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301007 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001013 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001016 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
Sathya Perla7101e112010-03-22 20:41:12 +00001024 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001028 stopped = true;
1029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001031 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001032
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001034 } else {
1035 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301036 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001037 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 */
Sathya Perla10329df2012-06-05 19:37:18 +00001064static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065{
Sathya Perla10329df2012-06-05 19:37:18 +00001066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001068 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001069
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
Sathya Perla92bf14a2013-08-27 16:57:32 +05301074 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001080 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001083 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001084
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001100 }
1101 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001103
Sathya Perlab31c50a2009-09-17 10:30:13 -07001104 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105
1106set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301107 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001117 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118}
1119
Patrick McHardy80d5c362013-04-19 02:04:28 +00001120static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001123 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter) && vid == 0)
1127 goto ret;
1128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301130 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001131
Somnath Kotura6b74e02014-01-21 15:50:55 +05301132 status = be_vid_config(adapter);
1133 if (status) {
1134 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001135 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301136 }
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001137ret:
1138 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139}
1140
Patrick McHardy80d5c362013-04-19 02:04:28 +00001141static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142{
1143 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001144 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter) && vid == 0)
1148 goto ret;
1149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301151 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001152 if (!status)
1153 adapter->vlans_added--;
1154 else
1155 adapter->vlan_tag[vid] = 1;
1156ret:
1157 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158}
1159
Sathya Perlaa54769f2011-10-24 02:45:00 +00001160static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161{
1162 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001163 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164
1165 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001166 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001167 adapter->promiscuous = true;
1168 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001170
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001171 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001172 if (adapter->promiscuous) {
1173 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001174 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001175
1176 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001177 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001178 }
1179
Sathya Perlae7b909a2009-11-22 22:01:10 +00001180 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001181 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301182 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001183 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001184 goto done;
1185 }
1186
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001187 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1188 struct netdev_hw_addr *ha;
1189 int i = 1; /* First slot is claimed by the Primary MAC */
1190
1191 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1192 be_cmd_pmac_del(adapter, adapter->if_handle,
1193 adapter->pmac_id[i], 0);
1194 }
1195
Sathya Perla92bf14a2013-08-27 16:57:32 +05301196 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001197 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1198 adapter->promiscuous = true;
1199 goto done;
1200 }
1201
1202 netdev_for_each_uc_addr(ha, adapter->netdev) {
1203 adapter->uc_macs++; /* First slot is for Primary MAC */
1204 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1205 adapter->if_handle,
1206 &adapter->pmac_id[adapter->uc_macs], 0);
1207 }
1208 }
1209
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001210 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1211
1212 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1213 if (status) {
1214 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1215 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1216 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1217 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001218done:
1219 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220}
1221
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001222static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1223{
1224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001225 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001226 int status;
1227
Sathya Perla11ac75e2011-12-13 00:58:50 +00001228 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001229 return -EPERM;
1230
Sathya Perla11ac75e2011-12-13 00:58:50 +00001231 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001232 return -EINVAL;
1233
Sathya Perla3175d8c2013-07-23 15:25:03 +05301234 if (BEx_chip(adapter)) {
1235 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1236 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1239 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301240 } else {
1241 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1242 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001243 }
1244
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001245 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001246 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1247 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001248 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001249 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001250
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001251 return status;
1252}
1253
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001254static int be_get_vf_config(struct net_device *netdev, int vf,
1255 struct ifla_vf_info *vi)
1256{
1257 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001258 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001259
Sathya Perla11ac75e2011-12-13 00:58:50 +00001260 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001261 return -EPERM;
1262
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001264 return -EINVAL;
1265
1266 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001268 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1269 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001271
1272 return 0;
1273}
1274
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001275static int be_set_vf_vlan(struct net_device *netdev,
1276 int vf, u16 vlan, u8 qos)
1277{
1278 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001279 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001280 int status = 0;
1281
Sathya Perla11ac75e2011-12-13 00:58:50 +00001282 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001283 return -EPERM;
1284
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001285 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001286 return -EINVAL;
1287
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001288 if (vlan || qos) {
1289 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301290 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001291 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1292 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001293 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001294 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301295 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1296 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001297 }
1298
Somnath Koturc5022242014-03-03 14:24:20 +05301299 if (!status)
1300 vf_cfg->vlan_tag = vlan;
1301 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001302 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301303 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001304 return status;
1305}
1306
Ajit Khapardee1d18732010-07-23 01:52:13 +00001307static int be_set_vf_tx_rate(struct net_device *netdev,
1308 int vf, int rate)
1309{
1310 struct be_adapter *adapter = netdev_priv(netdev);
1311 int status = 0;
1312
Sathya Perla11ac75e2011-12-13 00:58:50 +00001313 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001314 return -EPERM;
1315
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001316 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001317 return -EINVAL;
1318
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001319 if (rate < 100 || rate > 10000) {
1320 dev_err(&adapter->pdev->dev,
1321 "tx rate must be between 100 and 10000 Mbps\n");
1322 return -EINVAL;
1323 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001324
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001325 if (lancer_chip(adapter))
1326 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1327 else
1328 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001329
1330 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001331 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001332 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001333 else
1334 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001335 return status;
1336}
1337
Sathya Perla2632baf2013-10-01 16:00:00 +05301338static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1339 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340{
Sathya Perla2632baf2013-10-01 16:00:00 +05301341 aic->rx_pkts_prev = rx_pkts;
1342 aic->tx_reqs_prev = tx_pkts;
1343 aic->jiffies = now;
1344}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001345
Sathya Perla2632baf2013-10-01 16:00:00 +05301346static void be_eqd_update(struct be_adapter *adapter)
1347{
1348 struct be_set_eqd set_eqd[MAX_EVT_QS];
1349 int eqd, i, num = 0, start;
1350 struct be_aic_obj *aic;
1351 struct be_eq_obj *eqo;
1352 struct be_rx_obj *rxo;
1353 struct be_tx_obj *txo;
1354 u64 rx_pkts, tx_pkts;
1355 ulong now;
1356 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001357
Sathya Perla2632baf2013-10-01 16:00:00 +05301358 for_all_evt_queues(adapter, eqo, i) {
1359 aic = &adapter->aic_obj[eqo->idx];
1360 if (!aic->enable) {
1361 if (aic->jiffies)
1362 aic->jiffies = 0;
1363 eqd = aic->et_eqd;
1364 goto modify_eqd;
1365 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366
Sathya Perla2632baf2013-10-01 16:00:00 +05301367 rxo = &adapter->rx_obj[eqo->idx];
1368 do {
1369 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1370 rx_pkts = rxo->stats.rx_pkts;
1371 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001372
Sathya Perla2632baf2013-10-01 16:00:00 +05301373 txo = &adapter->tx_obj[eqo->idx];
1374 do {
1375 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1376 tx_pkts = txo->stats.tx_reqs;
1377 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001378
Sathya Perla4097f662009-03-24 16:40:13 -07001379
Sathya Perla2632baf2013-10-01 16:00:00 +05301380 /* Skip, if wrapped around or first calculation */
1381 now = jiffies;
1382 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1383 rx_pkts < aic->rx_pkts_prev ||
1384 tx_pkts < aic->tx_reqs_prev) {
1385 be_aic_update(aic, rx_pkts, tx_pkts, now);
1386 continue;
1387 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001388
Sathya Perla2632baf2013-10-01 16:00:00 +05301389 delta = jiffies_to_msecs(now - aic->jiffies);
1390 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1391 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1392 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001393
Sathya Perla2632baf2013-10-01 16:00:00 +05301394 if (eqd < 8)
1395 eqd = 0;
1396 eqd = min_t(u32, eqd, aic->max_eqd);
1397 eqd = max_t(u32, eqd, aic->min_eqd);
1398
1399 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001400modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301401 if (eqd != aic->prev_eqd) {
1402 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1403 set_eqd[num].eq_id = eqo->q.id;
1404 aic->prev_eqd = eqd;
1405 num++;
1406 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001407 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301408
1409 if (num)
1410 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001411}
1412
Sathya Perla3abcded2010-10-03 22:12:27 -07001413static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001415{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001416 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001417
Sathya Perlaab1594e2011-07-25 19:10:15 +00001418 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001419 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001421 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001423 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001425 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001426 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427}
1428
Sathya Perla2e588f82011-03-11 02:49:26 +00001429static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001430{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001431 /* L4 checksum is not reliable for non TCP/UDP packets.
1432 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001433 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1434 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001435}
1436
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301437static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001439 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001441 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301442 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443
Sathya Perla3abcded2010-10-03 22:12:27 -07001444 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445 BUG_ON(!rx_page_info->page);
1446
Ajit Khaparde205859a2010-02-09 01:34:21 +00001447 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001448 dma_unmap_page(&adapter->pdev->dev,
1449 dma_unmap_addr(rx_page_info, bus),
1450 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001451 rx_page_info->last_page_user = false;
1452 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301454 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 atomic_dec(&rxq->used);
1456 return rx_page_info;
1457}
1458
1459/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001460static void be_rx_compl_discard(struct be_rx_obj *rxo,
1461 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001464 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001466 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301467 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001468 put_page(page_info->page);
1469 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 }
1471}
1472
1473/*
1474 * skb_fill_rx_data forms a complete skb for an ether frame
1475 * indicated by rxcp.
1476 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1478 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001481 u16 i, j;
1482 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 u8 *start;
1484
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301485 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 start = page_address(page_info->page) + page_info->page_offset;
1487 prefetch(start);
1488
1489 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 skb->len = curr_frag_len;
1493 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001494 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 /* Complete packet has now been moved to data */
1496 put_page(page_info->page);
1497 skb->data_len = 0;
1498 skb->tail += curr_frag_len;
1499 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001500 hdr_len = ETH_HLEN;
1501 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001503 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 skb_shinfo(skb)->frags[0].page_offset =
1505 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001506 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001508 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 skb->tail += hdr_len;
1510 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001511 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512
Sathya Perla2e588f82011-03-11 02:49:26 +00001513 if (rxcp->pkt_size <= rx_frag_size) {
1514 BUG_ON(rxcp->num_rcvd != 1);
1515 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 }
1517
1518 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001519 remaining = rxcp->pkt_size - curr_frag_len;
1520 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301521 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001524 /* Coalesce all frags from the same physical page in one slot */
1525 if (page_info->page_offset == 0) {
1526 /* Fresh page */
1527 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001528 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001529 skb_shinfo(skb)->frags[j].page_offset =
1530 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001531 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001532 skb_shinfo(skb)->nr_frags++;
1533 } else {
1534 put_page(page_info->page);
1535 }
1536
Eric Dumazet9e903e02011-10-18 21:00:24 +00001537 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 skb->len += curr_frag_len;
1539 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001540 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001541 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001542 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001544 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545}
1546
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001547/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301548static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001549 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001552 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001554
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001555 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001556 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001557 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 return;
1560 }
1561
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001562 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001564 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001565 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001566 else
1567 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001569 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001570 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001572 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301573 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574
Jiri Pirko343e43c2011-08-25 02:50:51 +00001575 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001576 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001577
1578 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579}
1580
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001581/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001582static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1583 struct napi_struct *napi,
1584 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001586 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001588 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001589 u16 remaining, curr_frag_len;
1590 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001591
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001592 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001593 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001594 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001595 return;
1596 }
1597
Sathya Perla2e588f82011-03-11 02:49:26 +00001598 remaining = rxcp->pkt_size;
1599 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301600 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
1602 curr_frag_len = min(remaining, rx_frag_size);
1603
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001604 /* Coalesce all frags from the same physical page in one slot */
1605 if (i == 0 || page_info->page_offset == 0) {
1606 /* First frag or Fresh page */
1607 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001608 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001609 skb_shinfo(skb)->frags[j].page_offset =
1610 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001611 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001612 } else {
1613 put_page(page_info->page);
1614 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001615 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001616 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618 memset(page_info, 0, sizeof(*page_info));
1619 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001620 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001622 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001623 skb->len = rxcp->pkt_size;
1624 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001625 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001626 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001627 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001628 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301629 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001630
Jiri Pirko343e43c2011-08-25 02:50:51 +00001631 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001632 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635}
1636
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001637static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1638 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639{
Sathya Perla2e588f82011-03-11 02:49:26 +00001640 rxcp->pkt_size =
1641 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1642 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1643 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1644 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001645 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001646 rxcp->ip_csum =
1647 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1648 rxcp->l4_csum =
1649 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1650 rxcp->ipv6 =
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 rxcp->num_rcvd =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1654 rxcp->pkt_type =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001656 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001658 if (rxcp->vlanf) {
1659 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001660 compl);
1661 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1662 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001663 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001664 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001665}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1668 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001669{
1670 rxcp->pkt_size =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1672 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1673 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1674 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001675 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001676 rxcp->ip_csum =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1678 rxcp->l4_csum =
1679 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1680 rxcp->ipv6 =
1681 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001682 rxcp->num_rcvd =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1684 rxcp->pkt_type =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001686 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001688 if (rxcp->vlanf) {
1689 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001690 compl);
1691 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1692 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001693 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001694 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001695 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1696 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001697}
1698
1699static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1700{
1701 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1702 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1703 struct be_adapter *adapter = rxo->adapter;
1704
1705 /* For checking the valid bit it is Ok to use either definition as the
1706 * valid bit is at the same position in both v0 and v1 Rx compl */
1707 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 return NULL;
1709
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001710 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001711 be_dws_le_to_cpu(compl, sizeof(*compl));
1712
1713 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001715 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001716 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001717
Somnath Koture38b1702013-05-29 22:55:56 +00001718 if (rxcp->ip_frag)
1719 rxcp->l4_csum = 0;
1720
Sathya Perla15d72182011-03-21 20:49:26 +00001721 if (rxcp->vlanf) {
1722 /* vlanf could be wrongly set in some cards.
1723 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001724 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001725 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001726
Sathya Perla15d72182011-03-21 20:49:26 +00001727 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001728 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001729
Somnath Kotur939cf302011-08-18 21:51:49 -07001730 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001731 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001732 rxcp->vlanf = 0;
1733 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001734
1735 /* As the compl has been parsed, reset it; we wont touch it again */
1736 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737
Sathya Perla3abcded2010-10-03 22:12:27 -07001738 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739 return rxcp;
1740}
1741
Eric Dumazet1829b082011-03-01 05:48:12 +00001742static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001745
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001747 gfp |= __GFP_COMP;
1748 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749}
1750
1751/*
1752 * Allocate a page, split it to fragments of size rx_frag_size and post as
1753 * receive buffers to BE
1754 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001755static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756{
Sathya Perla3abcded2010-10-03 22:12:27 -07001757 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001758 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001759 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001761 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 struct be_eth_rx_d *rxd;
1763 u64 page_dmaaddr = 0, frag_dmaaddr;
1764 u32 posted, page_offset = 0;
1765
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1768 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001769 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001771 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 break;
1773 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001774 page_dmaaddr = dma_map_page(dev, pagep, 0,
1775 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001776 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001777 if (dma_mapping_error(dev, page_dmaaddr)) {
1778 put_page(pagep);
1779 pagep = NULL;
1780 rx_stats(rxo)->rx_post_fail++;
1781 break;
1782 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 page_info->page_offset = 0;
1784 } else {
1785 get_page(pagep);
1786 page_info->page_offset = page_offset + rx_frag_size;
1787 }
1788 page_offset = page_info->page_offset;
1789 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001790 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1792
1793 rxd = queue_head_node(rxq);
1794 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1795 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796
1797 /* Any space left in the current big page for another frag? */
1798 if ((page_offset + rx_frag_size + rx_frag_size) >
1799 adapter->big_page_size) {
1800 pagep = NULL;
1801 page_info->last_page_user = true;
1802 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001803
1804 prev_page_info = page_info;
1805 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001806 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807 }
1808 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001809 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810
1811 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301813 if (rxo->rx_post_starved)
1814 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001815 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001816 } else if (atomic_read(&rxq->used) == 0) {
1817 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001818 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820}
1821
Sathya Perla5fb379e2009-06-18 00:02:59 +00001822static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1825
1826 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1827 return NULL;
1828
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001829 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1831
1832 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1833
1834 queue_tail_inc(tx_cq);
1835 return txcp;
1836}
1837
Sathya Perla3c8def92011-06-12 20:01:58 +00001838static u16 be_tx_compl_process(struct be_adapter *adapter,
1839 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840{
Sathya Perla3c8def92011-06-12 20:01:58 +00001841 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001842 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001843 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001845 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1846 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001848 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001850 sent_skbs[txq->tail] = NULL;
1851
1852 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001853 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001855 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001857 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001858 unmap_tx_frag(&adapter->pdev->dev, wrb,
1859 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001860 unmap_skb_hdr = false;
1861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 num_wrbs++;
1863 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001864 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001867 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868}
1869
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001870/* Return the number of events in the event queue */
1871static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001872{
1873 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001874 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876 do {
1877 eqe = queue_tail_node(&eqo->q);
1878 if (eqe->evt == 0)
1879 break;
1880
1881 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001882 eqe->evt = 0;
1883 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884 queue_tail_inc(&eqo->q);
1885 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001886
1887 return num;
1888}
1889
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890/* Leaves the EQ is disarmed state */
1891static void be_eq_clean(struct be_eq_obj *eqo)
1892{
1893 int num = events_get(eqo);
1894
1895 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1896}
1897
1898static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899{
1900 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001901 struct be_queue_info *rxq = &rxo->q;
1902 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001903 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001904 struct be_adapter *adapter = rxo->adapter;
1905 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906
Sathya Perlad23e9462012-12-17 19:38:51 +00001907 /* Consume pending rx completions.
1908 * Wait for the flush completion (identified by zero num_rcvd)
1909 * to arrive. Notify CQ even when there are no more CQ entries
1910 * for HW to flush partially coalesced CQ entries.
1911 * In Lancer, there is no need to wait for flush compl.
1912 */
1913 for (;;) {
1914 rxcp = be_rx_compl_get(rxo);
1915 if (rxcp == NULL) {
1916 if (lancer_chip(adapter))
1917 break;
1918
1919 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1920 dev_warn(&adapter->pdev->dev,
1921 "did not receive flush compl\n");
1922 break;
1923 }
1924 be_cq_notify(adapter, rx_cq->id, true, 0);
1925 mdelay(1);
1926 } else {
1927 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001928 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001929 if (rxcp->num_rcvd == 0)
1930 break;
1931 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 }
1933
Sathya Perlad23e9462012-12-17 19:38:51 +00001934 /* After cleanup, leave the CQ in unarmed state */
1935 be_cq_notify(adapter, rx_cq->id, false, 0);
1936
1937 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301938 while (atomic_read(&rxq->used) > 0) {
1939 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 put_page(page_info->page);
1941 memset(page_info, 0, sizeof(*page_info));
1942 }
1943 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001944 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945}
1946
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001947static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001949 struct be_tx_obj *txo;
1950 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001951 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001952 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001953 struct sk_buff *sent_skb;
1954 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001955 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
Sathya Perlaa8e91792009-08-10 03:42:43 +00001957 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1958 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001959 pending_txqs = adapter->num_tx_qs;
1960
1961 for_all_tx_queues(adapter, txo, i) {
1962 txq = &txo->q;
1963 while ((txcp = be_tx_compl_get(&txo->cq))) {
1964 end_idx =
1965 AMAP_GET_BITS(struct amap_eth_tx_compl,
1966 wrb_index, txcp);
1967 num_wrbs += be_tx_compl_process(adapter, txo,
1968 end_idx);
1969 cmpl++;
1970 }
1971 if (cmpl) {
1972 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1973 atomic_sub(num_wrbs, &txq->used);
1974 cmpl = 0;
1975 num_wrbs = 0;
1976 }
1977 if (atomic_read(&txq->used) == 0)
1978 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001979 }
1980
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001981 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001982 break;
1983
1984 mdelay(1);
1985 } while (true);
1986
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001987 for_all_tx_queues(adapter, txo, i) {
1988 txq = &txo->q;
1989 if (atomic_read(&txq->used))
1990 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1991 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001992
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001993 /* free posted tx for which compls will never arrive */
1994 while (atomic_read(&txq->used)) {
1995 sent_skb = txo->sent_skb_list[txq->tail];
1996 end_idx = txq->tail;
1997 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1998 &dummy_wrb);
1999 index_adv(&end_idx, num_wrbs - 1, txq->len);
2000 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2001 atomic_sub(num_wrbs, &txq->used);
2002 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002003 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004}
2005
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006static void be_evt_queues_destroy(struct be_adapter *adapter)
2007{
2008 struct be_eq_obj *eqo;
2009 int i;
2010
2011 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002012 if (eqo->q.created) {
2013 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302015 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302016 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002017 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 be_queue_free(adapter, &eqo->q);
2019 }
2020}
2021
2022static int be_evt_queues_create(struct be_adapter *adapter)
2023{
2024 struct be_queue_info *eq;
2025 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302026 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027 int i, rc;
2028
Sathya Perla92bf14a2013-08-27 16:57:32 +05302029 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2030 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031
2032 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302033 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2034 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302035 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302036 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 eqo->adapter = adapter;
2038 eqo->tx_budget = BE_TX_BUDGET;
2039 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302040 aic->max_eqd = BE_MAX_EQD;
2041 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002042
2043 eq = &eqo->q;
2044 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2045 sizeof(struct be_eq_entry));
2046 if (rc)
2047 return rc;
2048
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302049 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 if (rc)
2051 return rc;
2052 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002053 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002054}
2055
Sathya Perla5fb379e2009-06-18 00:02:59 +00002056static void be_mcc_queues_destroy(struct be_adapter *adapter)
2057{
2058 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002059
Sathya Perla8788fdc2009-07-27 22:52:03 +00002060 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002061 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002062 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002063 be_queue_free(adapter, q);
2064
Sathya Perla8788fdc2009-07-27 22:52:03 +00002065 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002066 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002067 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002068 be_queue_free(adapter, q);
2069}
2070
2071/* Must be called only after TX qs are created as MCC shares TX EQ */
2072static int be_mcc_queues_create(struct be_adapter *adapter)
2073{
2074 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002075
Sathya Perla8788fdc2009-07-27 22:52:03 +00002076 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002077 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002078 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002079 goto err;
2080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 /* Use the default EQ for MCC completions */
2082 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002083 goto mcc_cq_free;
2084
Sathya Perla8788fdc2009-07-27 22:52:03 +00002085 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002086 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2087 goto mcc_cq_destroy;
2088
Sathya Perla8788fdc2009-07-27 22:52:03 +00002089 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090 goto mcc_q_free;
2091
2092 return 0;
2093
2094mcc_q_free:
2095 be_queue_free(adapter, q);
2096mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002097 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002098mcc_cq_free:
2099 be_queue_free(adapter, cq);
2100err:
2101 return -1;
2102}
2103
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104static void be_tx_queues_destroy(struct be_adapter *adapter)
2105{
2106 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002107 struct be_tx_obj *txo;
2108 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109
Sathya Perla3c8def92011-06-12 20:01:58 +00002110 for_all_tx_queues(adapter, txo, i) {
2111 q = &txo->q;
2112 if (q->created)
2113 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2114 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115
Sathya Perla3c8def92011-06-12 20:01:58 +00002116 q = &txo->cq;
2117 if (q->created)
2118 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2119 be_queue_free(adapter, q);
2120 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121}
2122
Sathya Perla77071332013-08-27 16:57:34 +05302123static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002126 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302127 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128
Sathya Perla92bf14a2013-08-27 16:57:32 +05302129 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002130
Sathya Perla3c8def92011-06-12 20:01:58 +00002131 for_all_tx_queues(adapter, txo, i) {
2132 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2134 sizeof(struct be_eth_tx_compl));
2135 if (status)
2136 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137
John Stultz827da442013-10-07 15:51:58 -07002138 u64_stats_init(&txo->stats.sync);
2139 u64_stats_init(&txo->stats.sync_compl);
2140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141 /* If num_evt_qs is less than num_tx_qs, then more than
2142 * one txq share an eq
2143 */
2144 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2145 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2146 if (status)
2147 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002148
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2150 sizeof(struct be_eth_wrb));
2151 if (status)
2152 return status;
2153
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002154 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155 if (status)
2156 return status;
2157 }
2158
Sathya Perlad3791422012-09-28 04:39:44 +00002159 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2160 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161 return 0;
2162}
2163
2164static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165{
2166 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002167 struct be_rx_obj *rxo;
2168 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169
Sathya Perla3abcded2010-10-03 22:12:27 -07002170 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 q = &rxo->cq;
2172 if (q->created)
2173 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2174 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176}
2177
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002179{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002180 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 struct be_rx_obj *rxo;
2182 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183
Sathya Perla92bf14a2013-08-27 16:57:32 +05302184 /* We can create as many RSS rings as there are EQs. */
2185 adapter->num_rx_qs = adapter->num_evt_qs;
2186
2187 /* We'll use RSS only if atleast 2 RSS rings are supported.
2188 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302190 if (adapter->num_rx_qs > 1)
2191 adapter->num_rx_qs++;
2192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 for_all_rx_queues(adapter, rxo, i) {
2195 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 cq = &rxo->cq;
2197 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2198 sizeof(struct be_eth_rx_compl));
2199 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201
John Stultz827da442013-10-07 15:51:58 -07002202 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2204 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002205 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208
Sathya Perlad3791422012-09-28 04:39:44 +00002209 dev_info(&adapter->pdev->dev,
2210 "created %d RSS queue(s) and 1 default RX queue\n",
2211 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002213}
2214
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215static irqreturn_t be_intx(int irq, void *dev)
2216{
Sathya Perlae49cc342012-11-27 19:50:02 +00002217 struct be_eq_obj *eqo = dev;
2218 struct be_adapter *adapter = eqo->adapter;
2219 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002221 /* IRQ is not expected when NAPI is scheduled as the EQ
2222 * will not be armed.
2223 * But, this can happen on Lancer INTx where it takes
2224 * a while to de-assert INTx or in BE2 where occasionaly
2225 * an interrupt may be raised even when EQ is unarmed.
2226 * If NAPI is already scheduled, then counting & notifying
2227 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002228 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002229 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002230 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002231 __napi_schedule(&eqo->napi);
2232 if (num_evts)
2233 eqo->spurious_intr = 0;
2234 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002235 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002236
2237 /* Return IRQ_HANDLED only for the the first spurious intr
2238 * after a valid intr to stop the kernel from branding
2239 * this irq as a bad one!
2240 */
2241 if (num_evts || eqo->spurious_intr++ == 0)
2242 return IRQ_HANDLED;
2243 else
2244 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245}
2246
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250
Sathya Perla0b545a62012-11-23 00:27:18 +00002251 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2252 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 return IRQ_HANDLED;
2254}
2255
Sathya Perla2e588f82011-03-11 02:49:26 +00002256static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257{
Somnath Koture38b1702013-05-29 22:55:56 +00002258 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259}
2260
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302262 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263{
Sathya Perla3abcded2010-10-03 22:12:27 -07002264 struct be_adapter *adapter = rxo->adapter;
2265 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002266 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 u32 work_done;
2268
2269 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002270 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 if (!rxcp)
2272 break;
2273
Sathya Perla12004ae2011-08-02 19:57:46 +00002274 /* Is it a flush compl that has no data */
2275 if (unlikely(rxcp->num_rcvd == 0))
2276 goto loop_continue;
2277
2278 /* Discard compl with partial DMA Lancer B0 */
2279 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002281 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002282 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002283
Sathya Perla12004ae2011-08-02 19:57:46 +00002284 /* On BE drop pkts that arrive due to imperfect filtering in
2285 * promiscuous mode on some skews
2286 */
2287 if (unlikely(rxcp->port != adapter->port_num &&
2288 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002290 goto loop_continue;
2291 }
2292
Sathya Perla6384a4d2013-10-25 10:40:16 +05302293 /* Don't do gro when we're busy_polling */
2294 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002296 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302297 be_rx_compl_process(rxo, napi, rxcp);
2298
Sathya Perla12004ae2011-08-02 19:57:46 +00002299loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002300 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 }
2302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 if (work_done) {
2304 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002305
Sathya Perla6384a4d2013-10-25 10:40:16 +05302306 /* When an rx-obj gets into post_starved state, just
2307 * let be_worker do the posting.
2308 */
2309 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2310 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 return work_done;
2315}
2316
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2318 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 for (work_done = 0; work_done < budget; work_done++) {
2324 txcp = be_tx_compl_get(&txo->cq);
2325 if (!txcp)
2326 break;
2327 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002328 AMAP_GET_BITS(struct amap_eth_tx_compl,
2329 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 }
2331
2332 if (work_done) {
2333 be_cq_notify(adapter, txo->cq.id, true, work_done);
2334 atomic_sub(num_wrbs, &txo->q.used);
2335
2336 /* As Tx wrbs have been freed up, wake up netdev queue
2337 * if it was stopped due to lack of tx wrbs. */
2338 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2339 atomic_read(&txo->q.used) < txo->q.len / 2) {
2340 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002341 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002342
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2344 tx_stats(txo)->tx_compl += work_done;
2345 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2346 }
2347 return (work_done < budget); /* Done */
2348}
Sathya Perla3c8def92011-06-12 20:01:58 +00002349
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302350int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002351{
2352 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2353 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002354 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302355 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002357
Sathya Perla0b545a62012-11-23 00:27:18 +00002358 num_evts = events_get(eqo);
2359
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 /* Process all TXQs serviced by this EQ */
2361 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2362 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2363 eqo->tx_budget, i);
2364 if (!tx_done)
2365 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002366 }
2367
Sathya Perla6384a4d2013-10-25 10:40:16 +05302368 if (be_lock_napi(eqo)) {
2369 /* This loop will iterate twice for EQ0 in which
2370 * completions of the last RXQ (default one) are also processed
2371 * For other EQs the loop iterates only once
2372 */
2373 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2374 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2375 max_work = max(work, max_work);
2376 }
2377 be_unlock_napi(eqo);
2378 } else {
2379 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002380 }
2381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382 if (is_mcc_eqo(eqo))
2383 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002384
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385 if (max_work < budget) {
2386 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002387 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388 } else {
2389 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002390 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002391 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393}
2394
Sathya Perla6384a4d2013-10-25 10:40:16 +05302395#ifdef CONFIG_NET_RX_BUSY_POLL
2396static int be_busy_poll(struct napi_struct *napi)
2397{
2398 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2399 struct be_adapter *adapter = eqo->adapter;
2400 struct be_rx_obj *rxo;
2401 int i, work = 0;
2402
2403 if (!be_lock_busy_poll(eqo))
2404 return LL_FLUSH_BUSY;
2405
2406 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2407 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2408 if (work)
2409 break;
2410 }
2411
2412 be_unlock_busy_poll(eqo);
2413 return work;
2414}
2415#endif
2416
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002417void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002418{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002419 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2420 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002421 u32 i;
2422
Sathya Perlad23e9462012-12-17 19:38:51 +00002423 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002424 return;
2425
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002426 if (lancer_chip(adapter)) {
2427 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2428 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2429 sliport_err1 = ioread32(adapter->db +
2430 SLIPORT_ERROR1_OFFSET);
2431 sliport_err2 = ioread32(adapter->db +
2432 SLIPORT_ERROR2_OFFSET);
2433 }
2434 } else {
2435 pci_read_config_dword(adapter->pdev,
2436 PCICFG_UE_STATUS_LOW, &ue_lo);
2437 pci_read_config_dword(adapter->pdev,
2438 PCICFG_UE_STATUS_HIGH, &ue_hi);
2439 pci_read_config_dword(adapter->pdev,
2440 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2441 pci_read_config_dword(adapter->pdev,
2442 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002443
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002444 ue_lo = (ue_lo & ~ue_lo_mask);
2445 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002446 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002447
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002448 /* On certain platforms BE hardware can indicate spurious UEs.
2449 * Allow the h/w to stop working completely in case of a real UE.
2450 * Hence not setting the hw_error for UE detection.
2451 */
2452 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002453 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302454 /* Do not log error messages if its a FW reset */
2455 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2456 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2457 dev_info(&adapter->pdev->dev,
2458 "Firmware update in progress\n");
2459 return;
2460 } else {
2461 dev_err(&adapter->pdev->dev,
2462 "Error detected in the card\n");
2463 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002464 }
2465
2466 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2467 dev_err(&adapter->pdev->dev,
2468 "ERR: sliport status 0x%x\n", sliport_status);
2469 dev_err(&adapter->pdev->dev,
2470 "ERR: sliport error1 0x%x\n", sliport_err1);
2471 dev_err(&adapter->pdev->dev,
2472 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002473 }
2474
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002475 if (ue_lo) {
2476 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2477 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002478 dev_err(&adapter->pdev->dev,
2479 "UE: %s bit set\n", ue_status_low_desc[i]);
2480 }
2481 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002482
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002483 if (ue_hi) {
2484 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2485 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002486 dev_err(&adapter->pdev->dev,
2487 "UE: %s bit set\n", ue_status_hi_desc[i]);
2488 }
2489 }
2490
2491}
2492
Sathya Perla8d56ff12009-11-22 22:02:26 +00002493static void be_msix_disable(struct be_adapter *adapter)
2494{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002495 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002496 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002497 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302498 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002499 }
2500}
2501
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002502static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302504 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002505 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506
Sathya Perla92bf14a2013-08-27 16:57:32 +05302507 /* If RoCE is supported, program the max number of NIC vectors that
2508 * may be configured via set-channels, along with vectors needed for
2509 * RoCe. Else, just program the number we'll use initially.
2510 */
2511 if (be_roce_supported(adapter))
2512 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2513 2 * num_online_cpus());
2514 else
2515 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002516
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002517 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518 adapter->msix_entries[i].entry = i;
2519
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002520 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002521 if (status == 0) {
2522 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302523 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002524 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002525 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2526 num_vec);
2527 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002528 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002529 }
Sathya Perlad3791422012-09-28 04:39:44 +00002530
2531 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302532
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002533 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2534 if (!be_physfn(adapter))
2535 return status;
2536 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002537done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302538 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2539 adapter->num_msix_roce_vec = num_vec / 2;
2540 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2541 adapter->num_msix_roce_vec);
2542 }
2543
2544 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2545
2546 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2547 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002548 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549}
2550
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002551static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002552 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302554 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555}
2556
2557static int be_msix_register(struct be_adapter *adapter)
2558{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 struct net_device *netdev = adapter->netdev;
2560 struct be_eq_obj *eqo;
2561 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002563 for_all_evt_queues(adapter, eqo, i) {
2564 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2565 vec = be_msix_vec_get(adapter, eqo);
2566 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002567 if (status)
2568 goto err_msix;
2569 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002572err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2574 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2575 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2576 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002577 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002578 return status;
2579}
2580
2581static int be_irq_register(struct be_adapter *adapter)
2582{
2583 struct net_device *netdev = adapter->netdev;
2584 int status;
2585
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002586 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587 status = be_msix_register(adapter);
2588 if (status == 0)
2589 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002590 /* INTx is not supported for VF */
2591 if (!be_physfn(adapter))
2592 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 }
2594
Sathya Perlae49cc342012-11-27 19:50:02 +00002595 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002596 netdev->irq = adapter->pdev->irq;
2597 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002598 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599 if (status) {
2600 dev_err(&adapter->pdev->dev,
2601 "INTx request IRQ failed - err %d\n", status);
2602 return status;
2603 }
2604done:
2605 adapter->isr_registered = true;
2606 return 0;
2607}
2608
2609static void be_irq_unregister(struct be_adapter *adapter)
2610{
2611 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002613 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614
2615 if (!adapter->isr_registered)
2616 return;
2617
2618 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002619 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002620 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621 goto done;
2622 }
2623
2624 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 for_all_evt_queues(adapter, eqo, i)
2626 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628done:
2629 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630}
2631
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002633{
2634 struct be_queue_info *q;
2635 struct be_rx_obj *rxo;
2636 int i;
2637
2638 for_all_rx_queues(adapter, rxo, i) {
2639 q = &rxo->q;
2640 if (q->created) {
2641 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002643 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002645 }
2646}
2647
Sathya Perla889cd4b2010-05-30 23:33:45 +00002648static int be_close(struct net_device *netdev)
2649{
2650 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 struct be_eq_obj *eqo;
2652 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002653
Parav Pandit045508a2012-03-26 14:27:13 +00002654 be_roce_dev_close(adapter);
2655
Ivan Veceradff345c52013-11-27 08:59:32 +01002656 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2657 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002658 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302659 be_disable_busy_poll(eqo);
2660 }
David S. Miller71237b62013-11-28 18:53:36 -05002661 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002662 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002663
2664 be_async_mcc_disable(adapter);
2665
2666 /* Wait for all pending tx completions to arrive so that
2667 * all tx skbs are freed.
2668 */
Sathya Perlafba87552013-05-08 02:05:50 +00002669 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302670 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002671
2672 be_rx_qs_destroy(adapter);
2673
Ajit Khaparded11a3472013-11-18 10:44:37 -06002674 for (i = 1; i < (adapter->uc_macs + 1); i++)
2675 be_cmd_pmac_del(adapter, adapter->if_handle,
2676 adapter->pmac_id[i], 0);
2677 adapter->uc_macs = 0;
2678
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002679 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002680 if (msix_enabled(adapter))
2681 synchronize_irq(be_msix_vec_get(adapter, eqo));
2682 else
2683 synchronize_irq(netdev->irq);
2684 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002685 }
2686
Sathya Perla889cd4b2010-05-30 23:33:45 +00002687 be_irq_unregister(adapter);
2688
Sathya Perla482c9e72011-06-29 23:33:17 +00002689 return 0;
2690}
2691
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002692static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002693{
2694 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002695 int rc, i, j;
2696 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002697
2698 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2700 sizeof(struct be_eth_rx_d));
2701 if (rc)
2702 return rc;
2703 }
2704
2705 /* The FW would like the default RXQ to be created first */
2706 rxo = default_rxo(adapter);
2707 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2708 adapter->if_handle, false, &rxo->rss_id);
2709 if (rc)
2710 return rc;
2711
2712 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002713 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002714 rx_frag_size, adapter->if_handle,
2715 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002716 if (rc)
2717 return rc;
2718 }
2719
2720 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002721 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2722 for_all_rss_queues(adapter, rxo, i) {
2723 if ((j + i) >= 128)
2724 break;
2725 rsstable[j + i] = rxo->rss_id;
2726 }
2727 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002728 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2729 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2730
2731 if (!BEx_chip(adapter))
2732 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2733 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302734 } else {
2735 /* Disable RSS, if only default RX Q is created */
2736 adapter->rss_flags = RSS_ENABLE_NONE;
2737 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002738
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302739 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2740 128);
2741 if (rc) {
2742 adapter->rss_flags = RSS_ENABLE_NONE;
2743 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002744 }
2745
2746 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002748 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002749 return 0;
2750}
2751
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752static int be_open(struct net_device *netdev)
2753{
2754 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002756 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002757 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002758 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002759 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002760
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002762 if (status)
2763 goto err;
2764
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002765 status = be_irq_register(adapter);
2766 if (status)
2767 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002769 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002770 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002771
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002772 for_all_tx_queues(adapter, txo, i)
2773 be_cq_notify(adapter, txo->cq.id, true, 0);
2774
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002775 be_async_mcc_enable(adapter);
2776
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002777 for_all_evt_queues(adapter, eqo, i) {
2778 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302779 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002780 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2781 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002782 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783
Sathya Perla323ff712012-09-28 04:39:43 +00002784 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002785 if (!status)
2786 be_link_status_update(adapter, link_status);
2787
Sathya Perlafba87552013-05-08 02:05:50 +00002788 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002789 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002790 return 0;
2791err:
2792 be_close(adapter->netdev);
2793 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002794}
2795
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002796static int be_setup_wol(struct be_adapter *adapter, bool enable)
2797{
2798 struct be_dma_mem cmd;
2799 int status = 0;
2800 u8 mac[ETH_ALEN];
2801
2802 memset(mac, 0, ETH_ALEN);
2803
2804 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002805 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2806 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002807 if (cmd.va == NULL)
2808 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002809
2810 if (enable) {
2811 status = pci_write_config_dword(adapter->pdev,
2812 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2813 if (status) {
2814 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002815 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002816 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2817 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002818 return status;
2819 }
2820 status = be_cmd_enable_magic_wol(adapter,
2821 adapter->netdev->dev_addr, &cmd);
2822 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2823 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2824 } else {
2825 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2826 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2827 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2828 }
2829
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002830 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002831 return status;
2832}
2833
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002834/*
2835 * Generate a seed MAC address from the PF MAC Address using jhash.
2836 * MAC Address for VFs are assigned incrementally starting from the seed.
2837 * These addresses are programmed in the ASIC by the PF and the VF driver
2838 * queries for the MAC address during its probe.
2839 */
Sathya Perla4c876612013-02-03 20:30:11 +00002840static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002841{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002842 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002843 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002844 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002845 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002846
2847 be_vf_eth_addr_generate(adapter, mac);
2848
Sathya Perla11ac75e2011-12-13 00:58:50 +00002849 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302850 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002851 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002852 vf_cfg->if_handle,
2853 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302854 else
2855 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2856 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002857
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002858 if (status)
2859 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002860 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002861 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002862 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002863
2864 mac[5] += 1;
2865 }
2866 return status;
2867}
2868
Sathya Perla4c876612013-02-03 20:30:11 +00002869static int be_vfs_mac_query(struct be_adapter *adapter)
2870{
2871 int status, vf;
2872 u8 mac[ETH_ALEN];
2873 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002874
2875 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302876 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2877 mac, vf_cfg->if_handle,
2878 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002879 if (status)
2880 return status;
2881 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2882 }
2883 return 0;
2884}
2885
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002886static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002887{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002888 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002889 u32 vf;
2890
Sathya Perla257a3fe2013-06-14 15:54:51 +05302891 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002892 dev_warn(&adapter->pdev->dev,
2893 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002894 goto done;
2895 }
2896
Sathya Perlab4c1df92013-05-08 02:05:47 +00002897 pci_disable_sriov(adapter->pdev);
2898
Sathya Perla11ac75e2011-12-13 00:58:50 +00002899 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302900 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002901 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2902 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302903 else
2904 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2905 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002906
Sathya Perla11ac75e2011-12-13 00:58:50 +00002907 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2908 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002909done:
2910 kfree(adapter->vf_cfg);
2911 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002912}
2913
Sathya Perla77071332013-08-27 16:57:34 +05302914static void be_clear_queues(struct be_adapter *adapter)
2915{
2916 be_mcc_queues_destroy(adapter);
2917 be_rx_cqs_destroy(adapter);
2918 be_tx_queues_destroy(adapter);
2919 be_evt_queues_destroy(adapter);
2920}
2921
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302922static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002923{
Sathya Perla191eb752012-02-23 18:50:13 +00002924 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2925 cancel_delayed_work_sync(&adapter->work);
2926 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2927 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302928}
2929
Somnath Koturb05004a2013-12-05 12:08:16 +05302930static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302931{
2932 int i;
2933
Somnath Koturb05004a2013-12-05 12:08:16 +05302934 if (adapter->pmac_id) {
2935 for (i = 0; i < (adapter->uc_macs + 1); i++)
2936 be_cmd_pmac_del(adapter, adapter->if_handle,
2937 adapter->pmac_id[i], 0);
2938 adapter->uc_macs = 0;
2939
2940 kfree(adapter->pmac_id);
2941 adapter->pmac_id = NULL;
2942 }
2943}
2944
2945static int be_clear(struct be_adapter *adapter)
2946{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302947 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002948
Sathya Perla11ac75e2011-12-13 00:58:50 +00002949 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002950 be_vf_clear(adapter);
2951
Sathya Perla2d17f402013-07-23 15:25:04 +05302952 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302953 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002954
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002955 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002956
Sathya Perla77071332013-08-27 16:57:34 +05302957 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002958
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002959 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002960 return 0;
2961}
2962
Sathya Perla4c876612013-02-03 20:30:11 +00002963static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002964{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302965 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002966 struct be_vf_cfg *vf_cfg;
2967 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002968 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002969
Sathya Perla4c876612013-02-03 20:30:11 +00002970 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2971 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002972
Sathya Perla4c876612013-02-03 20:30:11 +00002973 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302974 if (!BE3_chip(adapter)) {
2975 status = be_cmd_get_profile_config(adapter, &res,
2976 vf + 1);
2977 if (!status)
2978 cap_flags = res.if_cap_flags;
2979 }
Sathya Perla4c876612013-02-03 20:30:11 +00002980
2981 /* If a FW profile exists, then cap_flags are updated */
2982 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2983 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2984 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2985 &vf_cfg->if_handle, vf + 1);
2986 if (status)
2987 goto err;
2988 }
2989err:
2990 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002991}
2992
Sathya Perla39f1d942012-05-08 19:41:24 +00002993static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002994{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002995 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002996 int vf;
2997
Sathya Perla39f1d942012-05-08 19:41:24 +00002998 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2999 GFP_KERNEL);
3000 if (!adapter->vf_cfg)
3001 return -ENOMEM;
3002
Sathya Perla11ac75e2011-12-13 00:58:50 +00003003 for_all_vfs(adapter, vf_cfg, vf) {
3004 vf_cfg->if_handle = -1;
3005 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003006 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003007 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003008}
3009
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003010static int be_vf_setup(struct be_adapter *adapter)
3011{
Sathya Perla4c876612013-02-03 20:30:11 +00003012 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303013 struct be_vf_cfg *vf_cfg;
3014 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303015 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303016 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003017
Sathya Perla257a3fe2013-06-14 15:54:51 +05303018 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003019 if (old_vfs) {
3020 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3021 if (old_vfs != num_vfs)
3022 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3023 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003024 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303025 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003026 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303027 be_max_vfs(adapter), num_vfs);
3028 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003029 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003030 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003031 }
3032
3033 status = be_vf_setup_init(adapter);
3034 if (status)
3035 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003036
Sathya Perla4c876612013-02-03 20:30:11 +00003037 if (old_vfs) {
3038 for_all_vfs(adapter, vf_cfg, vf) {
3039 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3040 if (status)
3041 goto err;
3042 }
3043 } else {
3044 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003045 if (status)
3046 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003047 }
3048
Sathya Perla4c876612013-02-03 20:30:11 +00003049 if (old_vfs) {
3050 status = be_vfs_mac_query(adapter);
3051 if (status)
3052 goto err;
3053 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003054 status = be_vf_eth_addr_config(adapter);
3055 if (status)
3056 goto err;
3057 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003058
Sathya Perla11ac75e2011-12-13 00:58:50 +00003059 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303060 /* Allow VFs to programs MAC/VLAN filters */
3061 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3062 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3063 status = be_cmd_set_fn_privileges(adapter,
3064 privileges |
3065 BE_PRIV_FILTMGMT,
3066 vf + 1);
3067 if (!status)
3068 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3069 vf);
3070 }
3071
Sathya Perla4c876612013-02-03 20:30:11 +00003072 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3073 * Allow full available bandwidth
3074 */
3075 if (BE3_chip(adapter) && !old_vfs)
3076 be_cmd_set_qos(adapter, 1000, vf+1);
3077
3078 status = be_cmd_link_status_query(adapter, &lnk_speed,
3079 NULL, vf + 1);
3080 if (!status)
3081 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003082
Vasundhara Volam05998632013-10-01 15:59:59 +05303083 if (!old_vfs)
3084 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003085 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003086
3087 if (!old_vfs) {
3088 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3089 if (status) {
3090 dev_err(dev, "SRIOV enable failed\n");
3091 adapter->num_vfs = 0;
3092 goto err;
3093 }
3094 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003095 return 0;
3096err:
Sathya Perla4c876612013-02-03 20:30:11 +00003097 dev_err(dev, "VF setup failed\n");
3098 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003099 return status;
3100}
3101
Sathya Perla92bf14a2013-08-27 16:57:32 +05303102/* On BE2/BE3 FW does not suggest the supported limits */
3103static void BEx_get_resources(struct be_adapter *adapter,
3104 struct be_resources *res)
3105{
3106 struct pci_dev *pdev = adapter->pdev;
3107 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303108 int max_vfs;
3109
3110 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303111
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303112 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303113 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303114 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303115 }
3116
3117 if (be_physfn(adapter))
3118 res->max_uc_mac = BE_UC_PMAC_COUNT;
3119 else
3120 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3121
3122 if (adapter->function_mode & FLEX10_MODE)
3123 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003124 else if (adapter->function_mode & UMC_ENABLED)
3125 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303126 else
3127 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3128 res->max_mcast_mac = BE_MAX_MC;
3129
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303130 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303131 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303132 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303133 res->max_tx_qs = 1;
3134 else
3135 res->max_tx_qs = BE3_MAX_TX_QS;
3136
3137 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3138 !use_sriov && be_physfn(adapter))
3139 res->max_rss_qs = (adapter->be3_native) ?
3140 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3141 res->max_rx_qs = res->max_rss_qs + 1;
3142
Suresh Reddye3dc8672014-01-06 13:02:25 +05303143 if (be_physfn(adapter))
3144 res->max_evt_qs = (max_vfs > 0) ?
3145 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3146 else
3147 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303148
3149 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3150 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3151 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3152}
3153
Sathya Perla30128032011-11-10 19:17:57 +00003154static void be_setup_init(struct be_adapter *adapter)
3155{
3156 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003157 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003158 adapter->if_handle = -1;
3159 adapter->be3_native = false;
3160 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003161 if (be_physfn(adapter))
3162 adapter->cmd_privileges = MAX_PRIVILEGES;
3163 else
3164 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003165}
3166
Sathya Perla92bf14a2013-08-27 16:57:32 +05303167static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003168{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303169 struct device *dev = &adapter->pdev->dev;
3170 struct be_resources res = {0};
3171 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003172
Sathya Perla92bf14a2013-08-27 16:57:32 +05303173 if (BEx_chip(adapter)) {
3174 BEx_get_resources(adapter, &res);
3175 adapter->res = res;
3176 }
3177
Sathya Perla92bf14a2013-08-27 16:57:32 +05303178 /* For Lancer, SH etc read per-function resource limits from FW.
3179 * GET_FUNC_CONFIG returns per function guaranteed limits.
3180 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3181 */
Sathya Perla4c876612013-02-03 20:30:11 +00003182 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303183 status = be_cmd_get_func_config(adapter, &res);
3184 if (status)
3185 return status;
3186
3187 /* If RoCE may be enabled stash away half the EQs for RoCE */
3188 if (be_roce_supported(adapter))
3189 res.max_evt_qs /= 2;
3190 adapter->res = res;
3191
3192 if (be_physfn(adapter)) {
3193 status = be_cmd_get_profile_config(adapter, &res, 0);
3194 if (status)
3195 return status;
3196 adapter->res.max_vfs = res.max_vfs;
3197 }
3198
3199 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3200 be_max_txqs(adapter), be_max_rxqs(adapter),
3201 be_max_rss(adapter), be_max_eqs(adapter),
3202 be_max_vfs(adapter));
3203 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3204 be_max_uc(adapter), be_max_mc(adapter),
3205 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003206 }
3207
Sathya Perla92bf14a2013-08-27 16:57:32 +05303208 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003209}
3210
Sathya Perla39f1d942012-05-08 19:41:24 +00003211/* Routine to query per function resource limits */
3212static int be_get_config(struct be_adapter *adapter)
3213{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303214 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003215 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003216
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003217 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3218 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003219 &adapter->function_caps,
3220 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003221 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303222 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003223
Vasundhara Volam542963b2014-01-15 13:23:33 +05303224 if (be_physfn(adapter)) {
3225 status = be_cmd_get_active_profile(adapter, &profile_id);
3226 if (!status)
3227 dev_info(&adapter->pdev->dev,
3228 "Using profile 0x%x\n", profile_id);
3229 }
3230
Sathya Perla92bf14a2013-08-27 16:57:32 +05303231 status = be_get_resources(adapter);
3232 if (status)
3233 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003234
3235 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303236 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3237 GFP_KERNEL);
3238 if (!adapter->pmac_id)
3239 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003240
Sathya Perla92bf14a2013-08-27 16:57:32 +05303241 /* Sanitize cfg_num_qs based on HW and platform limits */
3242 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3243
3244 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003245}
3246
Sathya Perla95046b92013-07-23 15:25:02 +05303247static int be_mac_setup(struct be_adapter *adapter)
3248{
3249 u8 mac[ETH_ALEN];
3250 int status;
3251
3252 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3253 status = be_cmd_get_perm_mac(adapter, mac);
3254 if (status)
3255 return status;
3256
3257 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3258 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3259 } else {
3260 /* Maybe the HW was reset; dev_addr must be re-programmed */
3261 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3262 }
3263
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003264 /* For BE3-R VFs, the PF programs the initial MAC address */
3265 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3266 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3267 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303268 return 0;
3269}
3270
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303271static void be_schedule_worker(struct be_adapter *adapter)
3272{
3273 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3274 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3275}
3276
Sathya Perla77071332013-08-27 16:57:34 +05303277static int be_setup_queues(struct be_adapter *adapter)
3278{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303279 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303280 int status;
3281
3282 status = be_evt_queues_create(adapter);
3283 if (status)
3284 goto err;
3285
3286 status = be_tx_qs_create(adapter);
3287 if (status)
3288 goto err;
3289
3290 status = be_rx_cqs_create(adapter);
3291 if (status)
3292 goto err;
3293
3294 status = be_mcc_queues_create(adapter);
3295 if (status)
3296 goto err;
3297
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303298 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3299 if (status)
3300 goto err;
3301
3302 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3303 if (status)
3304 goto err;
3305
Sathya Perla77071332013-08-27 16:57:34 +05303306 return 0;
3307err:
3308 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3309 return status;
3310}
3311
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303312int be_update_queues(struct be_adapter *adapter)
3313{
3314 struct net_device *netdev = adapter->netdev;
3315 int status;
3316
3317 if (netif_running(netdev))
3318 be_close(netdev);
3319
3320 be_cancel_worker(adapter);
3321
3322 /* If any vectors have been shared with RoCE we cannot re-program
3323 * the MSIx table.
3324 */
3325 if (!adapter->num_msix_roce_vec)
3326 be_msix_disable(adapter);
3327
3328 be_clear_queues(adapter);
3329
3330 if (!msix_enabled(adapter)) {
3331 status = be_msix_enable(adapter);
3332 if (status)
3333 return status;
3334 }
3335
3336 status = be_setup_queues(adapter);
3337 if (status)
3338 return status;
3339
3340 be_schedule_worker(adapter);
3341
3342 if (netif_running(netdev))
3343 status = be_open(netdev);
3344
3345 return status;
3346}
3347
Sathya Perla5fb379e2009-06-18 00:02:59 +00003348static int be_setup(struct be_adapter *adapter)
3349{
Sathya Perla39f1d942012-05-08 19:41:24 +00003350 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303351 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003352 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353
Sathya Perla30128032011-11-10 19:17:57 +00003354 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003355
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003356 if (!lancer_chip(adapter))
3357 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003358
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003359 status = be_get_config(adapter);
3360 if (status)
3361 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003362
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003363 status = be_msix_enable(adapter);
3364 if (status)
3365 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003366
Sathya Perla77071332013-08-27 16:57:34 +05303367 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3368 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3369 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3370 en_flags |= BE_IF_FLAGS_RSS;
3371 en_flags = en_flags & be_if_cap_flags(adapter);
3372 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3373 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003374 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003375 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003376
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303377 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3378 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303379 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303380 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003381 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003382 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003384 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003385
Sathya Perla95046b92013-07-23 15:25:02 +05303386 status = be_mac_setup(adapter);
3387 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003388 goto err;
3389
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003390 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003391
Somnath Koture9e2a902013-10-24 14:37:53 +05303392 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3393 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3394 adapter->fw_ver);
3395 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3396 }
3397
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003398 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003399 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003400
3401 be_set_rx_mode(adapter->netdev);
3402
Suresh Reddy76a9e082014-01-15 13:23:40 +05303403 be_cmd_get_acpi_wol_cap(adapter);
3404
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003405 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003406
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003407 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3408 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003409 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003410
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303411 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303412 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003413 be_vf_setup(adapter);
3414 else
3415 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003416 }
3417
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003418 status = be_cmd_get_phy_info(adapter);
3419 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003420 adapter->phy.fc_autoneg = 1;
3421
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303422 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003423 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003424err:
3425 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003426 return status;
3427}
3428
Ivan Vecera66268732011-12-08 01:31:21 +00003429#ifdef CONFIG_NET_POLL_CONTROLLER
3430static void be_netpoll(struct net_device *netdev)
3431{
3432 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003433 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003434 int i;
3435
Sathya Perlae49cc342012-11-27 19:50:02 +00003436 for_all_evt_queues(adapter, eqo, i) {
3437 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3438 napi_schedule(&eqo->napi);
3439 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003440
3441 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003442}
3443#endif
3444
Ajit Khaparde84517482009-09-04 03:12:16 +00003445#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003446static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003447
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003448static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003449 const u8 *p, u32 img_start, int image_size,
3450 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003451{
3452 u32 crc_offset;
3453 u8 flashed_crc[4];
3454 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003455
3456 crc_offset = hdr_size + img_start + image_size - 4;
3457
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003458 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003459
3460 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003461 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003462 if (status) {
3463 dev_err(&adapter->pdev->dev,
3464 "could not get crc from flash, not flashing redboot\n");
3465 return false;
3466 }
3467
3468 /*update redboot only if crc does not match*/
3469 if (!memcmp(flashed_crc, p, 4))
3470 return false;
3471 else
3472 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003473}
3474
Sathya Perla306f1342011-08-02 19:57:45 +00003475static bool phy_flashing_required(struct be_adapter *adapter)
3476{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003477 return (adapter->phy.phy_type == TN_8022 &&
3478 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003479}
3480
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003481static bool is_comp_in_ufi(struct be_adapter *adapter,
3482 struct flash_section_info *fsec, int type)
3483{
3484 int i = 0, img_type = 0;
3485 struct flash_section_info_g2 *fsec_g2 = NULL;
3486
Sathya Perlaca34fe32012-11-06 17:48:56 +00003487 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003488 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3489
3490 for (i = 0; i < MAX_FLASH_COMP; i++) {
3491 if (fsec_g2)
3492 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3493 else
3494 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3495
3496 if (img_type == type)
3497 return true;
3498 }
3499 return false;
3500
3501}
3502
Jingoo Han4188e7d2013-08-05 18:02:02 +09003503static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003504 int header_size,
3505 const struct firmware *fw)
3506{
3507 struct flash_section_info *fsec = NULL;
3508 const u8 *p = fw->data;
3509
3510 p += header_size;
3511 while (p < (fw->data + fw->size)) {
3512 fsec = (struct flash_section_info *)p;
3513 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3514 return fsec;
3515 p += 32;
3516 }
3517 return NULL;
3518}
3519
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003520static int be_flash(struct be_adapter *adapter, const u8 *img,
3521 struct be_dma_mem *flash_cmd, int optype, int img_size)
3522{
3523 u32 total_bytes = 0, flash_op, num_bytes = 0;
3524 int status = 0;
3525 struct be_cmd_write_flashrom *req = flash_cmd->va;
3526
3527 total_bytes = img_size;
3528 while (total_bytes) {
3529 num_bytes = min_t(u32, 32*1024, total_bytes);
3530
3531 total_bytes -= num_bytes;
3532
3533 if (!total_bytes) {
3534 if (optype == OPTYPE_PHY_FW)
3535 flash_op = FLASHROM_OPER_PHY_FLASH;
3536 else
3537 flash_op = FLASHROM_OPER_FLASH;
3538 } else {
3539 if (optype == OPTYPE_PHY_FW)
3540 flash_op = FLASHROM_OPER_PHY_SAVE;
3541 else
3542 flash_op = FLASHROM_OPER_SAVE;
3543 }
3544
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003545 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003546 img += num_bytes;
3547 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3548 flash_op, num_bytes);
3549 if (status) {
3550 if (status == ILLEGAL_IOCTL_REQ &&
3551 optype == OPTYPE_PHY_FW)
3552 break;
3553 dev_err(&adapter->pdev->dev,
3554 "cmd to write to flash rom failed.\n");
3555 return status;
3556 }
3557 }
3558 return 0;
3559}
3560
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003561/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003562static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003563 const struct firmware *fw,
3564 struct be_dma_mem *flash_cmd,
3565 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003566
Ajit Khaparde84517482009-09-04 03:12:16 +00003567{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003568 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003569 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003570 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003571 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003572 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003573 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003574
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003575 struct flash_comp gen3_flash_types[] = {
3576 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3577 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3578 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3579 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3580 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3581 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3582 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3583 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3584 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3585 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3586 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3587 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3588 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3589 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3590 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3591 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3592 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3593 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3594 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3595 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003596 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003597
3598 struct flash_comp gen2_flash_types[] = {
3599 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3600 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3601 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3602 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3603 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3604 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3605 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3606 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3607 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3608 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3609 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3610 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3611 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3612 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3613 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3614 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003615 };
3616
Sathya Perlaca34fe32012-11-06 17:48:56 +00003617 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003618 pflashcomp = gen3_flash_types;
3619 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003620 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003621 } else {
3622 pflashcomp = gen2_flash_types;
3623 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003624 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003625 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003626
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003627 /* Get flash section info*/
3628 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3629 if (!fsec) {
3630 dev_err(&adapter->pdev->dev,
3631 "Invalid Cookie. UFI corrupted ?\n");
3632 return -1;
3633 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003634 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003635 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003636 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003637
3638 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3639 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3640 continue;
3641
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003642 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3643 !phy_flashing_required(adapter))
3644 continue;
3645
3646 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3647 redboot = be_flash_redboot(adapter, fw->data,
3648 pflashcomp[i].offset, pflashcomp[i].size,
3649 filehdr_size + img_hdrs_size);
3650 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003651 continue;
3652 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003653
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003654 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003655 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003656 if (p + pflashcomp[i].size > fw->data + fw->size)
3657 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003658
3659 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3660 pflashcomp[i].size);
3661 if (status) {
3662 dev_err(&adapter->pdev->dev,
3663 "Flashing section type %d failed.\n",
3664 pflashcomp[i].img_type);
3665 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003666 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003667 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003668 return 0;
3669}
3670
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003671static int be_flash_skyhawk(struct be_adapter *adapter,
3672 const struct firmware *fw,
3673 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003674{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003675 int status = 0, i, filehdr_size = 0;
3676 int img_offset, img_size, img_optype, redboot;
3677 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3678 const u8 *p = fw->data;
3679 struct flash_section_info *fsec = NULL;
3680
3681 filehdr_size = sizeof(struct flash_file_hdr_g3);
3682 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3683 if (!fsec) {
3684 dev_err(&adapter->pdev->dev,
3685 "Invalid Cookie. UFI corrupted ?\n");
3686 return -1;
3687 }
3688
3689 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3690 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3691 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3692
3693 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3694 case IMAGE_FIRMWARE_iSCSI:
3695 img_optype = OPTYPE_ISCSI_ACTIVE;
3696 break;
3697 case IMAGE_BOOT_CODE:
3698 img_optype = OPTYPE_REDBOOT;
3699 break;
3700 case IMAGE_OPTION_ROM_ISCSI:
3701 img_optype = OPTYPE_BIOS;
3702 break;
3703 case IMAGE_OPTION_ROM_PXE:
3704 img_optype = OPTYPE_PXE_BIOS;
3705 break;
3706 case IMAGE_OPTION_ROM_FCoE:
3707 img_optype = OPTYPE_FCOE_BIOS;
3708 break;
3709 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3710 img_optype = OPTYPE_ISCSI_BACKUP;
3711 break;
3712 case IMAGE_NCSI:
3713 img_optype = OPTYPE_NCSI_FW;
3714 break;
3715 default:
3716 continue;
3717 }
3718
3719 if (img_optype == OPTYPE_REDBOOT) {
3720 redboot = be_flash_redboot(adapter, fw->data,
3721 img_offset, img_size,
3722 filehdr_size + img_hdrs_size);
3723 if (!redboot)
3724 continue;
3725 }
3726
3727 p = fw->data;
3728 p += filehdr_size + img_offset + img_hdrs_size;
3729 if (p + img_size > fw->data + fw->size)
3730 return -1;
3731
3732 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3733 if (status) {
3734 dev_err(&adapter->pdev->dev,
3735 "Flashing section type %d failed.\n",
3736 fsec->fsec_entry[i].type);
3737 return status;
3738 }
3739 }
3740 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003741}
3742
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003743static int lancer_fw_download(struct be_adapter *adapter,
3744 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003745{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003746#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3747#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3748 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003749 const u8 *data_ptr = NULL;
3750 u8 *dest_image_ptr = NULL;
3751 size_t image_size = 0;
3752 u32 chunk_size = 0;
3753 u32 data_written = 0;
3754 u32 offset = 0;
3755 int status = 0;
3756 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003757 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003758
3759 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3760 dev_err(&adapter->pdev->dev,
3761 "FW Image not properly aligned. "
3762 "Length must be 4 byte aligned.\n");
3763 status = -EINVAL;
3764 goto lancer_fw_exit;
3765 }
3766
3767 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3768 + LANCER_FW_DOWNLOAD_CHUNK;
3769 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003770 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003771 if (!flash_cmd.va) {
3772 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003773 goto lancer_fw_exit;
3774 }
3775
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003776 dest_image_ptr = flash_cmd.va +
3777 sizeof(struct lancer_cmd_req_write_object);
3778 image_size = fw->size;
3779 data_ptr = fw->data;
3780
3781 while (image_size) {
3782 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3783
3784 /* Copy the image chunk content. */
3785 memcpy(dest_image_ptr, data_ptr, chunk_size);
3786
3787 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003788 chunk_size, offset,
3789 LANCER_FW_DOWNLOAD_LOCATION,
3790 &data_written, &change_status,
3791 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003792 if (status)
3793 break;
3794
3795 offset += data_written;
3796 data_ptr += data_written;
3797 image_size -= data_written;
3798 }
3799
3800 if (!status) {
3801 /* Commit the FW written */
3802 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003803 0, offset,
3804 LANCER_FW_DOWNLOAD_LOCATION,
3805 &data_written, &change_status,
3806 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003807 }
3808
3809 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3810 flash_cmd.dma);
3811 if (status) {
3812 dev_err(&adapter->pdev->dev,
3813 "Firmware load error. "
3814 "Status code: 0x%x Additional Status: 0x%x\n",
3815 status, add_status);
3816 goto lancer_fw_exit;
3817 }
3818
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003819 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303820 dev_info(&adapter->pdev->dev,
3821 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003822 status = lancer_physdev_ctrl(adapter,
3823 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003824 if (status) {
3825 dev_err(&adapter->pdev->dev,
3826 "Adapter busy for FW reset.\n"
3827 "New FW will not be active.\n");
3828 goto lancer_fw_exit;
3829 }
3830 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3831 dev_err(&adapter->pdev->dev,
3832 "System reboot required for new FW"
3833 " to be active\n");
3834 }
3835
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003836 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3837lancer_fw_exit:
3838 return status;
3839}
3840
Sathya Perlaca34fe32012-11-06 17:48:56 +00003841#define UFI_TYPE2 2
3842#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003843#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003844#define UFI_TYPE4 4
3845static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003846 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003847{
3848 if (fhdr == NULL)
3849 goto be_get_ufi_exit;
3850
Sathya Perlaca34fe32012-11-06 17:48:56 +00003851 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3852 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003853 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3854 if (fhdr->asic_type_rev == 0x10)
3855 return UFI_TYPE3R;
3856 else
3857 return UFI_TYPE3;
3858 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003859 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003860
3861be_get_ufi_exit:
3862 dev_err(&adapter->pdev->dev,
3863 "UFI and Interface are not compatible for flashing\n");
3864 return -1;
3865}
3866
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003867static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3868{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003869 struct flash_file_hdr_g3 *fhdr3;
3870 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003871 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003872 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003873 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003874
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003875 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003876 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3877 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003878 if (!flash_cmd.va) {
3879 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003880 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003881 }
3882
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003883 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003884 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003885
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003886 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003887
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003888 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3889 for (i = 0; i < num_imgs; i++) {
3890 img_hdr_ptr = (struct image_hdr *)(fw->data +
3891 (sizeof(struct flash_file_hdr_g3) +
3892 i * sizeof(struct image_hdr)));
3893 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003894 switch (ufi_type) {
3895 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003896 status = be_flash_skyhawk(adapter, fw,
3897 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003898 break;
3899 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003900 status = be_flash_BEx(adapter, fw, &flash_cmd,
3901 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003902 break;
3903 case UFI_TYPE3:
3904 /* Do not flash this ufi on BE3-R cards */
3905 if (adapter->asic_rev < 0x10)
3906 status = be_flash_BEx(adapter, fw,
3907 &flash_cmd,
3908 num_imgs);
3909 else {
3910 status = -1;
3911 dev_err(&adapter->pdev->dev,
3912 "Can't load BE3 UFI on BE3R\n");
3913 }
3914 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003915 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003916 }
3917
Sathya Perlaca34fe32012-11-06 17:48:56 +00003918 if (ufi_type == UFI_TYPE2)
3919 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003920 else if (ufi_type == -1)
3921 status = -1;
3922
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003923 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3924 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003925 if (status) {
3926 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003927 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003928 }
3929
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003930 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003931
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003932be_fw_exit:
3933 return status;
3934}
3935
3936int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3937{
3938 const struct firmware *fw;
3939 int status;
3940
3941 if (!netif_running(adapter->netdev)) {
3942 dev_err(&adapter->pdev->dev,
3943 "Firmware load not allowed (interface is down)\n");
3944 return -1;
3945 }
3946
3947 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3948 if (status)
3949 goto fw_exit;
3950
3951 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3952
3953 if (lancer_chip(adapter))
3954 status = lancer_fw_download(adapter, fw);
3955 else
3956 status = be_fw_download(adapter, fw);
3957
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003958 if (!status)
3959 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3960 adapter->fw_on_flash);
3961
Ajit Khaparde84517482009-09-04 03:12:16 +00003962fw_exit:
3963 release_firmware(fw);
3964 return status;
3965}
3966
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003967static int be_ndo_bridge_setlink(struct net_device *dev,
3968 struct nlmsghdr *nlh)
3969{
3970 struct be_adapter *adapter = netdev_priv(dev);
3971 struct nlattr *attr, *br_spec;
3972 int rem;
3973 int status = 0;
3974 u16 mode = 0;
3975
3976 if (!sriov_enabled(adapter))
3977 return -EOPNOTSUPP;
3978
3979 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3980
3981 nla_for_each_nested(attr, br_spec, rem) {
3982 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3983 continue;
3984
3985 mode = nla_get_u16(attr);
3986 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3987 return -EINVAL;
3988
3989 status = be_cmd_set_hsw_config(adapter, 0, 0,
3990 adapter->if_handle,
3991 mode == BRIDGE_MODE_VEPA ?
3992 PORT_FWD_TYPE_VEPA :
3993 PORT_FWD_TYPE_VEB);
3994 if (status)
3995 goto err;
3996
3997 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3998 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3999
4000 return status;
4001 }
4002err:
4003 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4004 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4005
4006 return status;
4007}
4008
4009static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4010 struct net_device *dev,
4011 u32 filter_mask)
4012{
4013 struct be_adapter *adapter = netdev_priv(dev);
4014 int status = 0;
4015 u8 hsw_mode;
4016
4017 if (!sriov_enabled(adapter))
4018 return 0;
4019
4020 /* BE and Lancer chips support VEB mode only */
4021 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4022 hsw_mode = PORT_FWD_TYPE_VEB;
4023 } else {
4024 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4025 adapter->if_handle, &hsw_mode);
4026 if (status)
4027 return 0;
4028 }
4029
4030 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4031 hsw_mode == PORT_FWD_TYPE_VEPA ?
4032 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4033}
4034
stephen hemmingere5686ad2012-01-05 19:10:25 +00004035static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004036 .ndo_open = be_open,
4037 .ndo_stop = be_close,
4038 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004039 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004040 .ndo_set_mac_address = be_mac_addr_set,
4041 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004042 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004043 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004044 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4045 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004046 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004047 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004048 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004049 .ndo_get_vf_config = be_get_vf_config,
4050#ifdef CONFIG_NET_POLL_CONTROLLER
4051 .ndo_poll_controller = be_netpoll,
4052#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004053 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4054 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304055#ifdef CONFIG_NET_RX_BUSY_POLL
4056 .ndo_busy_poll = be_busy_poll
4057#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004058};
4059
4060static void be_netdev_init(struct net_device *netdev)
4061{
4062 struct be_adapter *adapter = netdev_priv(netdev);
4063
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004064 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004065 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004066 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004067 if (be_multi_rxq(adapter))
4068 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004069
4070 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004071 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004072
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004073 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004074 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004075
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004076 netdev->priv_flags |= IFF_UNICAST_FLT;
4077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004078 netdev->flags |= IFF_MULTICAST;
4079
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004080 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004081
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004082 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004083
4084 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004085}
4086
4087static void be_unmap_pci_bars(struct be_adapter *adapter)
4088{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004089 if (adapter->csr)
4090 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004091 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004092 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004093}
4094
Sathya Perlace66f782012-11-06 17:48:58 +00004095static int db_bar(struct be_adapter *adapter)
4096{
4097 if (lancer_chip(adapter) || !be_physfn(adapter))
4098 return 0;
4099 else
4100 return 4;
4101}
4102
4103static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004104{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004105 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004106 adapter->roce_db.size = 4096;
4107 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4108 db_bar(adapter));
4109 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4110 db_bar(adapter));
4111 }
Parav Pandit045508a2012-03-26 14:27:13 +00004112 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004113}
4114
4115static int be_map_pci_bars(struct be_adapter *adapter)
4116{
4117 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004118
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004119 if (BEx_chip(adapter) && be_physfn(adapter)) {
4120 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4121 if (adapter->csr == NULL)
4122 return -ENOMEM;
4123 }
4124
Sathya Perlace66f782012-11-06 17:48:58 +00004125 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004126 if (addr == NULL)
4127 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004128 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004129
4130 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004131 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004132
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004133pci_map_err:
4134 be_unmap_pci_bars(adapter);
4135 return -ENOMEM;
4136}
4137
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004138static void be_ctrl_cleanup(struct be_adapter *adapter)
4139{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004140 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004141
4142 be_unmap_pci_bars(adapter);
4143
4144 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004145 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4146 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004147
Sathya Perla5b8821b2011-08-02 19:57:44 +00004148 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004149 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004150 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4151 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004152}
4153
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004154static int be_ctrl_init(struct be_adapter *adapter)
4155{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004156 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4157 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004158 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004159 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004160 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004161
Sathya Perlace66f782012-11-06 17:48:58 +00004162 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4163 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4164 SLI_INTF_FAMILY_SHIFT;
4165 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4166
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004167 status = be_map_pci_bars(adapter);
4168 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004169 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004170
4171 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004172 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4173 mbox_mem_alloc->size,
4174 &mbox_mem_alloc->dma,
4175 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004176 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004177 status = -ENOMEM;
4178 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004179 }
4180 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4181 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4182 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4183 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004184
Sathya Perla5b8821b2011-08-02 19:57:44 +00004185 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004186 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4187 rx_filter->size, &rx_filter->dma,
4188 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004189 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004190 status = -ENOMEM;
4191 goto free_mbox;
4192 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004193
Ivan Vecera29849612010-12-14 05:43:19 +00004194 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004195 spin_lock_init(&adapter->mcc_lock);
4196 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004197
Suresh Reddy5eeff632014-01-06 13:02:24 +05304198 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004199 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004200 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004201
4202free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004203 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4204 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004205
4206unmap_pci_bars:
4207 be_unmap_pci_bars(adapter);
4208
4209done:
4210 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004211}
4212
4213static void be_stats_cleanup(struct be_adapter *adapter)
4214{
Sathya Perla3abcded2010-10-03 22:12:27 -07004215 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004216
4217 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004218 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4219 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004220}
4221
4222static int be_stats_init(struct be_adapter *adapter)
4223{
Sathya Perla3abcded2010-10-03 22:12:27 -07004224 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225
Sathya Perlaca34fe32012-11-06 17:48:56 +00004226 if (lancer_chip(adapter))
4227 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4228 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004229 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004230 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004231 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004232 else
4233 /* ALL non-BE ASICs */
4234 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004235
Joe Perchesede23fa82013-08-26 22:45:23 -07004236 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4237 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004238 if (cmd->va == NULL)
4239 return -1;
4240 return 0;
4241}
4242
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004243static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244{
4245 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004246
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004247 if (!adapter)
4248 return;
4249
Parav Pandit045508a2012-03-26 14:27:13 +00004250 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004251 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004252
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004253 cancel_delayed_work_sync(&adapter->func_recovery_work);
4254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 unregister_netdev(adapter->netdev);
4256
Sathya Perla5fb379e2009-06-18 00:02:59 +00004257 be_clear(adapter);
4258
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004259 /* tell fw we're done with firing cmds */
4260 be_cmd_fw_clean(adapter);
4261
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004262 be_stats_cleanup(adapter);
4263
4264 be_ctrl_cleanup(adapter);
4265
Sathya Perlad6b6d982012-09-05 01:56:48 +00004266 pci_disable_pcie_error_reporting(pdev);
4267
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004268 pci_release_regions(pdev);
4269 pci_disable_device(pdev);
4270
4271 free_netdev(adapter->netdev);
4272}
4273
Sathya Perla39f1d942012-05-08 19:41:24 +00004274static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304276 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004277
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004278 status = be_cmd_get_cntl_attributes(adapter);
4279 if (status)
4280 return status;
4281
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004282 /* Must be a power of 2 or else MODULO will BUG_ON */
4283 adapter->be_get_temp_freq = 64;
4284
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304285 if (BEx_chip(adapter)) {
4286 level = be_cmd_get_fw_log_level(adapter);
4287 adapter->msg_enable =
4288 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4289 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004290
Sathya Perla92bf14a2013-08-27 16:57:32 +05304291 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004292 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004293}
4294
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004295static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004296{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004297 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004298 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004299
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004300 status = lancer_test_and_set_rdy_state(adapter);
4301 if (status)
4302 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004303
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004304 if (netif_running(adapter->netdev))
4305 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004306
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004307 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004308
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004309 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004310
4311 status = be_setup(adapter);
4312 if (status)
4313 goto err;
4314
4315 if (netif_running(adapter->netdev)) {
4316 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004317 if (status)
4318 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004319 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004320
Somnath Kotur4bebb562013-12-05 12:07:55 +05304321 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004322 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004323err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004324 if (status == -EAGAIN)
4325 dev_err(dev, "Waiting for resource provisioning\n");
4326 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304327 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004328
4329 return status;
4330}
4331
4332static void be_func_recovery_task(struct work_struct *work)
4333{
4334 struct be_adapter *adapter =
4335 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004336 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004337
4338 be_detect_error(adapter);
4339
4340 if (adapter->hw_error && lancer_chip(adapter)) {
4341
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004342 rtnl_lock();
4343 netif_device_detach(adapter->netdev);
4344 rtnl_unlock();
4345
4346 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004347 if (!status)
4348 netif_device_attach(adapter->netdev);
4349 }
4350
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004351 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4352 * no need to attempt further recovery.
4353 */
4354 if (!status || status == -EAGAIN)
4355 schedule_delayed_work(&adapter->func_recovery_work,
4356 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004357}
4358
4359static void be_worker(struct work_struct *work)
4360{
4361 struct be_adapter *adapter =
4362 container_of(work, struct be_adapter, work.work);
4363 struct be_rx_obj *rxo;
4364 int i;
4365
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004366 /* when interrupts are not yet enabled, just reap any pending
4367 * mcc completions */
4368 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004369 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004370 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004371 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004372 goto reschedule;
4373 }
4374
4375 if (!adapter->stats_cmd_sent) {
4376 if (lancer_chip(adapter))
4377 lancer_cmd_get_pport_stats(adapter,
4378 &adapter->stats_cmd);
4379 else
4380 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4381 }
4382
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304383 if (be_physfn(adapter) &&
4384 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004385 be_cmd_get_die_temperature(adapter);
4386
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004387 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304388 /* Replenish RX-queues starved due to memory
4389 * allocation failures.
4390 */
4391 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004392 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004393 }
4394
Sathya Perla2632baf2013-10-01 16:00:00 +05304395 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004396
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004397reschedule:
4398 adapter->work_counter++;
4399 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4400}
4401
Sathya Perla257a3fe2013-06-14 15:54:51 +05304402/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004403static bool be_reset_required(struct be_adapter *adapter)
4404{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304405 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004406}
4407
Sathya Perlad3791422012-09-28 04:39:44 +00004408static char *mc_name(struct be_adapter *adapter)
4409{
4410 if (adapter->function_mode & FLEX10_MODE)
4411 return "FLEX10";
4412 else if (adapter->function_mode & VNIC_MODE)
4413 return "vNIC";
4414 else if (adapter->function_mode & UMC_ENABLED)
4415 return "UMC";
4416 else
4417 return "";
4418}
4419
4420static inline char *func_name(struct be_adapter *adapter)
4421{
4422 return be_physfn(adapter) ? "PF" : "VF";
4423}
4424
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004425static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004426{
4427 int status = 0;
4428 struct be_adapter *adapter;
4429 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004430 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004431
4432 status = pci_enable_device(pdev);
4433 if (status)
4434 goto do_none;
4435
4436 status = pci_request_regions(pdev, DRV_NAME);
4437 if (status)
4438 goto disable_dev;
4439 pci_set_master(pdev);
4440
Sathya Perla7f640062012-06-05 19:37:20 +00004441 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004442 if (netdev == NULL) {
4443 status = -ENOMEM;
4444 goto rel_reg;
4445 }
4446 adapter = netdev_priv(netdev);
4447 adapter->pdev = pdev;
4448 pci_set_drvdata(pdev, adapter);
4449 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004450 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004451
Russell King4c15c242013-06-26 23:49:11 +01004452 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004453 if (!status) {
4454 netdev->features |= NETIF_F_HIGHDMA;
4455 } else {
Russell King4c15c242013-06-26 23:49:11 +01004456 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004457 if (status) {
4458 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4459 goto free_netdev;
4460 }
4461 }
4462
Ajit Khapardeea58c182013-10-18 16:06:24 -05004463 if (be_physfn(adapter)) {
4464 status = pci_enable_pcie_error_reporting(pdev);
4465 if (!status)
4466 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4467 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004468
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004469 status = be_ctrl_init(adapter);
4470 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004471 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004472
Sathya Perla2243e2e2009-11-22 22:02:03 +00004473 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004474 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004475 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004476 if (status)
4477 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004478 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004479
Sathya Perla39f1d942012-05-08 19:41:24 +00004480 if (be_reset_required(adapter)) {
4481 status = be_cmd_reset_function(adapter);
4482 if (status)
4483 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004484
Kalesh AP2d177be2013-04-28 22:22:29 +00004485 /* Wait for interrupts to quiesce after an FLR */
4486 msleep(100);
4487 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004488
4489 /* Allow interrupts for other ULPs running on NIC function */
4490 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004491
Kalesh AP2d177be2013-04-28 22:22:29 +00004492 /* tell fw we're ready to fire cmds */
4493 status = be_cmd_fw_init(adapter);
4494 if (status)
4495 goto ctrl_clean;
4496
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004497 status = be_stats_init(adapter);
4498 if (status)
4499 goto ctrl_clean;
4500
Sathya Perla39f1d942012-05-08 19:41:24 +00004501 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004502 if (status)
4503 goto stats_clean;
4504
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004505 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004506 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004507 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004508
Sathya Perla5fb379e2009-06-18 00:02:59 +00004509 status = be_setup(adapter);
4510 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004511 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004512
Sathya Perla3abcded2010-10-03 22:12:27 -07004513 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004514 status = register_netdev(netdev);
4515 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004516 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517
Parav Pandit045508a2012-03-26 14:27:13 +00004518 be_roce_dev_add(adapter);
4519
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004520 schedule_delayed_work(&adapter->func_recovery_work,
4521 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004522
4523 be_cmd_query_port_name(adapter, &port_name);
4524
Sathya Perlad3791422012-09-28 04:39:44 +00004525 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4526 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004528 return 0;
4529
Sathya Perla5fb379e2009-06-18 00:02:59 +00004530unsetup:
4531 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004532stats_clean:
4533 be_stats_cleanup(adapter);
4534ctrl_clean:
4535 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004536free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004537 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538rel_reg:
4539 pci_release_regions(pdev);
4540disable_dev:
4541 pci_disable_device(pdev);
4542do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004543 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004544 return status;
4545}
4546
4547static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4548{
4549 struct be_adapter *adapter = pci_get_drvdata(pdev);
4550 struct net_device *netdev = adapter->netdev;
4551
Suresh Reddy76a9e082014-01-15 13:23:40 +05304552 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004553 be_setup_wol(adapter, true);
4554
Ajit Khaparded4360d62013-11-22 12:51:09 -06004555 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004556 cancel_delayed_work_sync(&adapter->func_recovery_work);
4557
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004558 netif_device_detach(netdev);
4559 if (netif_running(netdev)) {
4560 rtnl_lock();
4561 be_close(netdev);
4562 rtnl_unlock();
4563 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004564 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004565
4566 pci_save_state(pdev);
4567 pci_disable_device(pdev);
4568 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4569 return 0;
4570}
4571
4572static int be_resume(struct pci_dev *pdev)
4573{
4574 int status = 0;
4575 struct be_adapter *adapter = pci_get_drvdata(pdev);
4576 struct net_device *netdev = adapter->netdev;
4577
4578 netif_device_detach(netdev);
4579
4580 status = pci_enable_device(pdev);
4581 if (status)
4582 return status;
4583
Yijing Wang1ca01512013-06-27 20:53:42 +08004584 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004585 pci_restore_state(pdev);
4586
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304587 status = be_fw_wait_ready(adapter);
4588 if (status)
4589 return status;
4590
Ajit Khaparded4360d62013-11-22 12:51:09 -06004591 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004592 /* tell fw we're ready to fire cmds */
4593 status = be_cmd_fw_init(adapter);
4594 if (status)
4595 return status;
4596
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004597 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004598 if (netif_running(netdev)) {
4599 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004600 be_open(netdev);
4601 rtnl_unlock();
4602 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004603
4604 schedule_delayed_work(&adapter->func_recovery_work,
4605 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004606 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004607
Suresh Reddy76a9e082014-01-15 13:23:40 +05304608 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004609 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004611 return 0;
4612}
4613
Sathya Perla82456b02010-02-17 01:35:37 +00004614/*
4615 * An FLR will stop BE from DMAing any data.
4616 */
4617static void be_shutdown(struct pci_dev *pdev)
4618{
4619 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004620
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004621 if (!adapter)
4622 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004623
Sathya Perla0f4a6822011-03-21 20:49:28 +00004624 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004625 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004626
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004627 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004628
Ajit Khaparde57841862011-04-06 18:08:43 +00004629 be_cmd_reset_function(adapter);
4630
Sathya Perla82456b02010-02-17 01:35:37 +00004631 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004632}
4633
Sathya Perlacf588472010-02-14 21:22:01 +00004634static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4635 pci_channel_state_t state)
4636{
4637 struct be_adapter *adapter = pci_get_drvdata(pdev);
4638 struct net_device *netdev = adapter->netdev;
4639
4640 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4641
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004642 if (!adapter->eeh_error) {
4643 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004644
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004645 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004646
Sathya Perlacf588472010-02-14 21:22:01 +00004647 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004648 netif_device_detach(netdev);
4649 if (netif_running(netdev))
4650 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004651 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004652
4653 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004654 }
Sathya Perlacf588472010-02-14 21:22:01 +00004655
4656 if (state == pci_channel_io_perm_failure)
4657 return PCI_ERS_RESULT_DISCONNECT;
4658
4659 pci_disable_device(pdev);
4660
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004661 /* The error could cause the FW to trigger a flash debug dump.
4662 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004663 * can cause it not to recover; wait for it to finish.
4664 * Wait only for first function as it is needed only once per
4665 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004666 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004667 if (pdev->devfn == 0)
4668 ssleep(30);
4669
Sathya Perlacf588472010-02-14 21:22:01 +00004670 return PCI_ERS_RESULT_NEED_RESET;
4671}
4672
4673static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4674{
4675 struct be_adapter *adapter = pci_get_drvdata(pdev);
4676 int status;
4677
4678 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004679
4680 status = pci_enable_device(pdev);
4681 if (status)
4682 return PCI_ERS_RESULT_DISCONNECT;
4683
4684 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004685 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004686 pci_restore_state(pdev);
4687
4688 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004689 dev_info(&adapter->pdev->dev,
4690 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004691 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004692 if (status)
4693 return PCI_ERS_RESULT_DISCONNECT;
4694
Sathya Perlad6b6d982012-09-05 01:56:48 +00004695 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004696 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004697 return PCI_ERS_RESULT_RECOVERED;
4698}
4699
4700static void be_eeh_resume(struct pci_dev *pdev)
4701{
4702 int status = 0;
4703 struct be_adapter *adapter = pci_get_drvdata(pdev);
4704 struct net_device *netdev = adapter->netdev;
4705
4706 dev_info(&adapter->pdev->dev, "EEH resume\n");
4707
4708 pci_save_state(pdev);
4709
Kalesh AP2d177be2013-04-28 22:22:29 +00004710 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004711 if (status)
4712 goto err;
4713
Kalesh AP2d177be2013-04-28 22:22:29 +00004714 /* tell fw we're ready to fire cmds */
4715 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004716 if (status)
4717 goto err;
4718
Sathya Perlacf588472010-02-14 21:22:01 +00004719 status = be_setup(adapter);
4720 if (status)
4721 goto err;
4722
4723 if (netif_running(netdev)) {
4724 status = be_open(netdev);
4725 if (status)
4726 goto err;
4727 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004728
4729 schedule_delayed_work(&adapter->func_recovery_work,
4730 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004731 netif_device_attach(netdev);
4732 return;
4733err:
4734 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004735}
4736
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004737static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004738 .error_detected = be_eeh_err_detected,
4739 .slot_reset = be_eeh_reset,
4740 .resume = be_eeh_resume,
4741};
4742
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004743static struct pci_driver be_driver = {
4744 .name = DRV_NAME,
4745 .id_table = be_dev_ids,
4746 .probe = be_probe,
4747 .remove = be_remove,
4748 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004749 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004750 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004751 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004752};
4753
4754static int __init be_init_module(void)
4755{
Joe Perches8e95a202009-12-03 07:58:21 +00004756 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4757 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004758 printk(KERN_WARNING DRV_NAME
4759 " : Module param rx_frag_size must be 2048/4096/8192."
4760 " Using 2048\n");
4761 rx_frag_size = 2048;
4762 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004764 return pci_register_driver(&be_driver);
4765}
4766module_init(be_init_module);
4767
4768static void __exit be_exit_module(void)
4769{
4770 pci_unregister_driver(&be_driver);
4771}
4772module_exit(be_exit_module);