blob: 34644969a4be07eae7b973451a164e4c0b9dd181 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000925 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500926 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000927 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000936 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000937 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
946
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530948 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000949 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530950 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000951 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000953
Somnath Kotur93040ae2012-06-26 22:32:10 +0000954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000983 if (unlikely(!skb))
984 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000985 }
986
Sathya Perlaee9c7992013-05-22 23:04:55 +0000987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301007 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001013 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001016 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
Sathya Perla7101e112010-03-22 20:41:12 +00001024 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001028 stopped = true;
1029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001031 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001032
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001034 } else {
1035 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301036 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001037 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 */
Sathya Perla10329df2012-06-05 19:37:18 +00001064static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065{
Sathya Perla10329df2012-06-05 19:37:18 +00001066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001068 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001069
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
Sathya Perla92bf14a2013-08-27 16:57:32 +05301074 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001080 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001083 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001084
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001100 }
1101 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001103
Sathya Perlab31c50a2009-09-17 10:30:13 -07001104 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105
1106set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301107 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001117 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118}
1119
Patrick McHardy80d5c362013-04-19 02:04:28 +00001120static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001123 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter) && vid == 0)
1127 goto ret;
1128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301130 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001131
Somnath Kotura6b74e02014-01-21 15:50:55 +05301132 status = be_vid_config(adapter);
1133 if (status) {
1134 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001135 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301136 }
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001137ret:
1138 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139}
1140
Patrick McHardy80d5c362013-04-19 02:04:28 +00001141static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142{
1143 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001144 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter) && vid == 0)
1148 goto ret;
1149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301151 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001152 if (!status)
1153 adapter->vlans_added--;
1154 else
1155 adapter->vlan_tag[vid] = 1;
1156ret:
1157 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158}
1159
Sathya Perlaa54769f2011-10-24 02:45:00 +00001160static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161{
1162 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001163 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164
1165 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001166 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001167 adapter->promiscuous = true;
1168 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001170
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001171 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001172 if (adapter->promiscuous) {
1173 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001174 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001175
1176 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001177 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001178 }
1179
Sathya Perlae7b909a2009-11-22 22:01:10 +00001180 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001181 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301182 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001183 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001184 goto done;
1185 }
1186
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001187 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1188 struct netdev_hw_addr *ha;
1189 int i = 1; /* First slot is claimed by the Primary MAC */
1190
1191 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1192 be_cmd_pmac_del(adapter, adapter->if_handle,
1193 adapter->pmac_id[i], 0);
1194 }
1195
Sathya Perla92bf14a2013-08-27 16:57:32 +05301196 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001197 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1198 adapter->promiscuous = true;
1199 goto done;
1200 }
1201
1202 netdev_for_each_uc_addr(ha, adapter->netdev) {
1203 adapter->uc_macs++; /* First slot is for Primary MAC */
1204 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1205 adapter->if_handle,
1206 &adapter->pmac_id[adapter->uc_macs], 0);
1207 }
1208 }
1209
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001210 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1211
1212 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1213 if (status) {
1214 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1215 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1216 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1217 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001218done:
1219 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220}
1221
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001222static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1223{
1224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001225 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001226 int status;
1227
Sathya Perla11ac75e2011-12-13 00:58:50 +00001228 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001229 return -EPERM;
1230
Sathya Perla11ac75e2011-12-13 00:58:50 +00001231 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001232 return -EINVAL;
1233
Sathya Perla3175d8c2013-07-23 15:25:03 +05301234 if (BEx_chip(adapter)) {
1235 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1236 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1239 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301240 } else {
1241 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1242 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001243 }
1244
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001245 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001246 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1247 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001248 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001249 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001250
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001251 return status;
1252}
1253
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001254static int be_get_vf_config(struct net_device *netdev, int vf,
1255 struct ifla_vf_info *vi)
1256{
1257 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001258 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001259
Sathya Perla11ac75e2011-12-13 00:58:50 +00001260 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001261 return -EPERM;
1262
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001264 return -EINVAL;
1265
1266 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001268 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1269 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001271
1272 return 0;
1273}
1274
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001275static int be_set_vf_vlan(struct net_device *netdev,
1276 int vf, u16 vlan, u8 qos)
1277{
1278 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001279 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001280 int status = 0;
1281
Sathya Perla11ac75e2011-12-13 00:58:50 +00001282 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001283 return -EPERM;
1284
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001285 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001286 return -EINVAL;
1287
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001288 if (vlan || qos) {
1289 vlan |= qos << VLAN_PRIO_SHIFT;
1290 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001291 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001292 vf_cfg->vlan_tag = vlan;
1293 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1294 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001295 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001296 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001297 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001298 vf_cfg->vlan_tag = 0;
1299 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001300 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001301 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001302 }
1303
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001304
1305 if (status)
1306 dev_info(&adapter->pdev->dev,
1307 "VLAN %d config on VF %d failed\n", vlan, vf);
1308 return status;
1309}
1310
Ajit Khapardee1d18732010-07-23 01:52:13 +00001311static int be_set_vf_tx_rate(struct net_device *netdev,
1312 int vf, int rate)
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
1315 int status = 0;
1316
Sathya Perla11ac75e2011-12-13 00:58:50 +00001317 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001318 return -EPERM;
1319
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001320 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001321 return -EINVAL;
1322
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001323 if (rate < 100 || rate > 10000) {
1324 dev_err(&adapter->pdev->dev,
1325 "tx rate must be between 100 and 10000 Mbps\n");
1326 return -EINVAL;
1327 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001328
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001329 if (lancer_chip(adapter))
1330 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1331 else
1332 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001333
1334 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001335 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001336 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001337 else
1338 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001339 return status;
1340}
1341
Sathya Perla2632baf2013-10-01 16:00:00 +05301342static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1343 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344{
Sathya Perla2632baf2013-10-01 16:00:00 +05301345 aic->rx_pkts_prev = rx_pkts;
1346 aic->tx_reqs_prev = tx_pkts;
1347 aic->jiffies = now;
1348}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001349
Sathya Perla2632baf2013-10-01 16:00:00 +05301350static void be_eqd_update(struct be_adapter *adapter)
1351{
1352 struct be_set_eqd set_eqd[MAX_EVT_QS];
1353 int eqd, i, num = 0, start;
1354 struct be_aic_obj *aic;
1355 struct be_eq_obj *eqo;
1356 struct be_rx_obj *rxo;
1357 struct be_tx_obj *txo;
1358 u64 rx_pkts, tx_pkts;
1359 ulong now;
1360 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001361
Sathya Perla2632baf2013-10-01 16:00:00 +05301362 for_all_evt_queues(adapter, eqo, i) {
1363 aic = &adapter->aic_obj[eqo->idx];
1364 if (!aic->enable) {
1365 if (aic->jiffies)
1366 aic->jiffies = 0;
1367 eqd = aic->et_eqd;
1368 goto modify_eqd;
1369 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Sathya Perla2632baf2013-10-01 16:00:00 +05301371 rxo = &adapter->rx_obj[eqo->idx];
1372 do {
1373 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1374 rx_pkts = rxo->stats.rx_pkts;
1375 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376
Sathya Perla2632baf2013-10-01 16:00:00 +05301377 txo = &adapter->tx_obj[eqo->idx];
1378 do {
1379 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1380 tx_pkts = txo->stats.tx_reqs;
1381 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001382
Sathya Perla4097f662009-03-24 16:40:13 -07001383
Sathya Perla2632baf2013-10-01 16:00:00 +05301384 /* Skip, if wrapped around or first calculation */
1385 now = jiffies;
1386 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1387 rx_pkts < aic->rx_pkts_prev ||
1388 tx_pkts < aic->tx_reqs_prev) {
1389 be_aic_update(aic, rx_pkts, tx_pkts, now);
1390 continue;
1391 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001392
Sathya Perla2632baf2013-10-01 16:00:00 +05301393 delta = jiffies_to_msecs(now - aic->jiffies);
1394 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1395 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1396 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001397
Sathya Perla2632baf2013-10-01 16:00:00 +05301398 if (eqd < 8)
1399 eqd = 0;
1400 eqd = min_t(u32, eqd, aic->max_eqd);
1401 eqd = max_t(u32, eqd, aic->min_eqd);
1402
1403 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001404modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301405 if (eqd != aic->prev_eqd) {
1406 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1407 set_eqd[num].eq_id = eqo->q.id;
1408 aic->prev_eqd = eqd;
1409 num++;
1410 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001411 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301412
1413 if (num)
1414 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001415}
1416
Sathya Perla3abcded2010-10-03 22:12:27 -07001417static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001418 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001419{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001420 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001421
Sathya Perlaab1594e2011-07-25 19:10:15 +00001422 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001423 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001425 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001426 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001429 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001430 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431}
1432
Sathya Perla2e588f82011-03-11 02:49:26 +00001433static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001434{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001435 /* L4 checksum is not reliable for non TCP/UDP packets.
1436 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001437 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1438 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001439}
1440
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301441static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001443 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001445 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301446 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447
Sathya Perla3abcded2010-10-03 22:12:27 -07001448 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 BUG_ON(!rx_page_info->page);
1450
Sathya Perlae50287b2014-03-04 12:14:38 +05301451 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001452 dma_unmap_page(&adapter->pdev->dev,
1453 dma_unmap_addr(rx_page_info, bus),
1454 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301455 rx_page_info->last_frag = false;
1456 } else {
1457 dma_sync_single_for_cpu(&adapter->pdev->dev,
1458 dma_unmap_addr(rx_page_info, bus),
1459 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001460 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301462 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 atomic_dec(&rxq->used);
1464 return rx_page_info;
1465}
1466
1467/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001468static void be_rx_compl_discard(struct be_rx_obj *rxo,
1469 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001474 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301475 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001476 put_page(page_info->page);
1477 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 }
1479}
1480
1481/*
1482 * skb_fill_rx_data forms a complete skb for an ether frame
1483 * indicated by rxcp.
1484 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1486 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001489 u16 i, j;
1490 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 u8 *start;
1492
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301493 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 start = page_address(page_info->page) + page_info->page_offset;
1495 prefetch(start);
1496
1497 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001498 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 skb->len = curr_frag_len;
1501 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001502 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 /* Complete packet has now been moved to data */
1504 put_page(page_info->page);
1505 skb->data_len = 0;
1506 skb->tail += curr_frag_len;
1507 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001508 hdr_len = ETH_HLEN;
1509 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001511 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 skb_shinfo(skb)->frags[0].page_offset =
1513 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001514 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001516 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 skb->tail += hdr_len;
1518 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001519 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
Sathya Perla2e588f82011-03-11 02:49:26 +00001521 if (rxcp->pkt_size <= rx_frag_size) {
1522 BUG_ON(rxcp->num_rcvd != 1);
1523 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 }
1525
1526 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 remaining = rxcp->pkt_size - curr_frag_len;
1528 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301529 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001530 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001532 /* Coalesce all frags from the same physical page in one slot */
1533 if (page_info->page_offset == 0) {
1534 /* Fresh page */
1535 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001536 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001537 skb_shinfo(skb)->frags[j].page_offset =
1538 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001539 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001540 skb_shinfo(skb)->nr_frags++;
1541 } else {
1542 put_page(page_info->page);
1543 }
1544
Eric Dumazet9e903e02011-10-18 21:00:24 +00001545 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 skb->len += curr_frag_len;
1547 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001548 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001549 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001550 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001552 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553}
1554
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001555/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301556static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001557 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001560 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001562
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001563 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001564 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001565 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001566 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 return;
1568 }
1569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001572 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001573 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001574 else
1575 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001577 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001578 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001580 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301581 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582
Jiri Pirko343e43c2011-08-25 02:50:51 +00001583 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001584 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001585
1586 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587}
1588
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001589/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001590static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1591 struct napi_struct *napi,
1592 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001594 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001596 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001597 u16 remaining, curr_frag_len;
1598 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001599
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001600 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001601 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001602 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001603 return;
1604 }
1605
Sathya Perla2e588f82011-03-11 02:49:26 +00001606 remaining = rxcp->pkt_size;
1607 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301608 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609
1610 curr_frag_len = min(remaining, rx_frag_size);
1611
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001612 /* Coalesce all frags from the same physical page in one slot */
1613 if (i == 0 || page_info->page_offset == 0) {
1614 /* First frag or Fresh page */
1615 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001616 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001617 skb_shinfo(skb)->frags[j].page_offset =
1618 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001619 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001620 } else {
1621 put_page(page_info->page);
1622 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001623 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001624 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 memset(page_info, 0, sizeof(*page_info));
1627 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001628 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001630 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001631 skb->len = rxcp->pkt_size;
1632 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001633 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001634 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001635 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001636 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301637 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001638
Jiri Pirko343e43c2011-08-25 02:50:51 +00001639 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001640 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001642 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643}
1644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001645static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1646 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
Sathya Perla2e588f82011-03-11 02:49:26 +00001648 rxcp->pkt_size =
1649 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1650 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1651 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1652 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001653 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001654 rxcp->ip_csum =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1656 rxcp->l4_csum =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1658 rxcp->ipv6 =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001660 rxcp->num_rcvd =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1662 rxcp->pkt_type =
1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001664 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001666 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301667 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001668 compl);
1669 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1670 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001671 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001672 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001673}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1676 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001677{
1678 rxcp->pkt_size =
1679 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1680 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1681 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1682 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001683 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001684 rxcp->ip_csum =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1686 rxcp->l4_csum =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1688 rxcp->ipv6 =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001690 rxcp->num_rcvd =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1692 rxcp->pkt_type =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001694 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001696 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301697 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001698 compl);
1699 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1700 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001701 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001702 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001703 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1704 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001705}
1706
1707static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1708{
1709 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1710 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1711 struct be_adapter *adapter = rxo->adapter;
1712
1713 /* For checking the valid bit it is Ok to use either definition as the
1714 * valid bit is at the same position in both v0 and v1 Rx compl */
1715 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 return NULL;
1717
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001718 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 be_dws_le_to_cpu(compl, sizeof(*compl));
1720
1721 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001722 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001723 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001724 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001725
Somnath Koture38b1702013-05-29 22:55:56 +00001726 if (rxcp->ip_frag)
1727 rxcp->l4_csum = 0;
1728
Sathya Perla15d72182011-03-21 20:49:26 +00001729 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301730 /* In QNQ modes, if qnq bit is not set, then the packet was
1731 * tagged only with the transparent outer vlan-tag and must
1732 * not be treated as a vlan packet by host
1733 */
1734 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001735 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001736
Sathya Perla15d72182011-03-21 20:49:26 +00001737 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001738 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001739
Somnath Kotur939cf302011-08-18 21:51:49 -07001740 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001741 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001742 rxcp->vlanf = 0;
1743 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001744
1745 /* As the compl has been parsed, reset it; we wont touch it again */
1746 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 return rxcp;
1750}
1751
Eric Dumazet1829b082011-03-01 05:48:12 +00001752static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001755
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001757 gfp |= __GFP_COMP;
1758 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759}
1760
1761/*
1762 * Allocate a page, split it to fragments of size rx_frag_size and post as
1763 * receive buffers to BE
1764 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001765static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766{
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001768 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001769 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001771 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 struct be_eth_rx_d *rxd;
1773 u64 page_dmaaddr = 0, frag_dmaaddr;
1774 u32 posted, page_offset = 0;
1775
Sathya Perla3abcded2010-10-03 22:12:27 -07001776 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1778 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001779 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001781 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 break;
1783 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001784 page_dmaaddr = dma_map_page(dev, pagep, 0,
1785 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001786 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001787 if (dma_mapping_error(dev, page_dmaaddr)) {
1788 put_page(pagep);
1789 pagep = NULL;
1790 rx_stats(rxo)->rx_post_fail++;
1791 break;
1792 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301793 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794 } else {
1795 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301796 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301798 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
1801 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301802 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1804 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
1806 /* Any space left in the current big page for another frag? */
1807 if ((page_offset + rx_frag_size + rx_frag_size) >
1808 adapter->big_page_size) {
1809 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301810 page_info->last_frag = true;
1811 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1812 } else {
1813 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001815
1816 prev_page_info = page_info;
1817 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301820
1821 /* Mark the last frag of a page when we break out of the above loop
1822 * with no more slots available in the RXQ
1823 */
1824 if (pagep) {
1825 prev_page_info->last_frag = true;
1826 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1827 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828
1829 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301831 if (rxo->rx_post_starved)
1832 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001833 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001834 } else if (atomic_read(&rxq->used) == 0) {
1835 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
1839
Sathya Perla5fb379e2009-06-18 00:02:59 +00001840static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1843
1844 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1845 return NULL;
1846
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001847 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1849
1850 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1851
1852 queue_tail_inc(tx_cq);
1853 return txcp;
1854}
1855
Sathya Perla3c8def92011-06-12 20:01:58 +00001856static u16 be_tx_compl_process(struct be_adapter *adapter,
1857 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858{
Sathya Perla3c8def92011-06-12 20:01:58 +00001859 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001860 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001861 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001863 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1864 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001866 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001868 sent_skbs[txq->tail] = NULL;
1869
1870 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001871 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001873 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001875 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001876 unmap_tx_frag(&adapter->pdev->dev, wrb,
1877 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001878 unmap_skb_hdr = false;
1879
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 num_wrbs++;
1881 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001882 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001885 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886}
1887
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888/* Return the number of events in the event queue */
1889static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001890{
1891 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001893
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894 do {
1895 eqe = queue_tail_node(&eqo->q);
1896 if (eqe->evt == 0)
1897 break;
1898
1899 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001900 eqe->evt = 0;
1901 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001902 queue_tail_inc(&eqo->q);
1903 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001904
1905 return num;
1906}
1907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908/* Leaves the EQ is disarmed state */
1909static void be_eq_clean(struct be_eq_obj *eqo)
1910{
1911 int num = events_get(eqo);
1912
1913 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1914}
1915
1916static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917{
1918 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001919 struct be_queue_info *rxq = &rxo->q;
1920 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001921 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001922 struct be_adapter *adapter = rxo->adapter;
1923 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
Sathya Perlad23e9462012-12-17 19:38:51 +00001925 /* Consume pending rx completions.
1926 * Wait for the flush completion (identified by zero num_rcvd)
1927 * to arrive. Notify CQ even when there are no more CQ entries
1928 * for HW to flush partially coalesced CQ entries.
1929 * In Lancer, there is no need to wait for flush compl.
1930 */
1931 for (;;) {
1932 rxcp = be_rx_compl_get(rxo);
1933 if (rxcp == NULL) {
1934 if (lancer_chip(adapter))
1935 break;
1936
1937 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1938 dev_warn(&adapter->pdev->dev,
1939 "did not receive flush compl\n");
1940 break;
1941 }
1942 be_cq_notify(adapter, rx_cq->id, true, 0);
1943 mdelay(1);
1944 } else {
1945 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001946 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001947 if (rxcp->num_rcvd == 0)
1948 break;
1949 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 }
1951
Sathya Perlad23e9462012-12-17 19:38:51 +00001952 /* After cleanup, leave the CQ in unarmed state */
1953 be_cq_notify(adapter, rx_cq->id, false, 0);
1954
1955 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301956 while (atomic_read(&rxq->used) > 0) {
1957 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 put_page(page_info->page);
1959 memset(page_info, 0, sizeof(*page_info));
1960 }
1961 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001962 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963}
1964
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001965static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001967 struct be_tx_obj *txo;
1968 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001969 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001970 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001971 struct sk_buff *sent_skb;
1972 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001973 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974
Sathya Perlaa8e91792009-08-10 03:42:43 +00001975 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1976 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001977 pending_txqs = adapter->num_tx_qs;
1978
1979 for_all_tx_queues(adapter, txo, i) {
1980 txq = &txo->q;
1981 while ((txcp = be_tx_compl_get(&txo->cq))) {
1982 end_idx =
1983 AMAP_GET_BITS(struct amap_eth_tx_compl,
1984 wrb_index, txcp);
1985 num_wrbs += be_tx_compl_process(adapter, txo,
1986 end_idx);
1987 cmpl++;
1988 }
1989 if (cmpl) {
1990 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1991 atomic_sub(num_wrbs, &txq->used);
1992 cmpl = 0;
1993 num_wrbs = 0;
1994 }
1995 if (atomic_read(&txq->used) == 0)
1996 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001997 }
1998
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001999 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00002000 break;
2001
2002 mdelay(1);
2003 } while (true);
2004
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002005 for_all_tx_queues(adapter, txo, i) {
2006 txq = &txo->q;
2007 if (atomic_read(&txq->used))
2008 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2009 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002010
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002011 /* free posted tx for which compls will never arrive */
2012 while (atomic_read(&txq->used)) {
2013 sent_skb = txo->sent_skb_list[txq->tail];
2014 end_idx = txq->tail;
2015 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2016 &dummy_wrb);
2017 index_adv(&end_idx, num_wrbs - 1, txq->len);
2018 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2019 atomic_sub(num_wrbs, &txq->used);
2020 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002021 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022}
2023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024static void be_evt_queues_destroy(struct be_adapter *adapter)
2025{
2026 struct be_eq_obj *eqo;
2027 int i;
2028
2029 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002030 if (eqo->q.created) {
2031 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302033 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302034 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002035 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 be_queue_free(adapter, &eqo->q);
2037 }
2038}
2039
2040static int be_evt_queues_create(struct be_adapter *adapter)
2041{
2042 struct be_queue_info *eq;
2043 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302044 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 int i, rc;
2046
Sathya Perla92bf14a2013-08-27 16:57:32 +05302047 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2048 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049
2050 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302051 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2052 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302053 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302054 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002055 eqo->adapter = adapter;
2056 eqo->tx_budget = BE_TX_BUDGET;
2057 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302058 aic->max_eqd = BE_MAX_EQD;
2059 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060
2061 eq = &eqo->q;
2062 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2063 sizeof(struct be_eq_entry));
2064 if (rc)
2065 return rc;
2066
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302067 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068 if (rc)
2069 return rc;
2070 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002071 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072}
2073
Sathya Perla5fb379e2009-06-18 00:02:59 +00002074static void be_mcc_queues_destroy(struct be_adapter *adapter)
2075{
2076 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002077
Sathya Perla8788fdc2009-07-27 22:52:03 +00002078 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002079 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002080 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002081 be_queue_free(adapter, q);
2082
Sathya Perla8788fdc2009-07-27 22:52:03 +00002083 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002084 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002085 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002086 be_queue_free(adapter, q);
2087}
2088
2089/* Must be called only after TX qs are created as MCC shares TX EQ */
2090static int be_mcc_queues_create(struct be_adapter *adapter)
2091{
2092 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002093
Sathya Perla8788fdc2009-07-27 22:52:03 +00002094 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002095 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002096 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002097 goto err;
2098
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 /* Use the default EQ for MCC completions */
2100 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002101 goto mcc_cq_free;
2102
Sathya Perla8788fdc2009-07-27 22:52:03 +00002103 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002104 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2105 goto mcc_cq_destroy;
2106
Sathya Perla8788fdc2009-07-27 22:52:03 +00002107 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002108 goto mcc_q_free;
2109
2110 return 0;
2111
2112mcc_q_free:
2113 be_queue_free(adapter, q);
2114mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002115 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002116mcc_cq_free:
2117 be_queue_free(adapter, cq);
2118err:
2119 return -1;
2120}
2121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122static void be_tx_queues_destroy(struct be_adapter *adapter)
2123{
2124 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002125 struct be_tx_obj *txo;
2126 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127
Sathya Perla3c8def92011-06-12 20:01:58 +00002128 for_all_tx_queues(adapter, txo, i) {
2129 q = &txo->q;
2130 if (q->created)
2131 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2132 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133
Sathya Perla3c8def92011-06-12 20:01:58 +00002134 q = &txo->cq;
2135 if (q->created)
2136 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2137 be_queue_free(adapter, q);
2138 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139}
2140
Sathya Perla77071332013-08-27 16:57:34 +05302141static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002144 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302145 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146
Sathya Perla92bf14a2013-08-27 16:57:32 +05302147 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002148
Sathya Perla3c8def92011-06-12 20:01:58 +00002149 for_all_tx_queues(adapter, txo, i) {
2150 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2152 sizeof(struct be_eth_tx_compl));
2153 if (status)
2154 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155
John Stultz827da442013-10-07 15:51:58 -07002156 u64_stats_init(&txo->stats.sync);
2157 u64_stats_init(&txo->stats.sync_compl);
2158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 /* If num_evt_qs is less than num_tx_qs, then more than
2160 * one txq share an eq
2161 */
2162 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2163 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2164 if (status)
2165 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2168 sizeof(struct be_eth_wrb));
2169 if (status)
2170 return status;
2171
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002172 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 if (status)
2174 return status;
2175 }
2176
Sathya Perlad3791422012-09-28 04:39:44 +00002177 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2178 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 return 0;
2180}
2181
2182static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183{
2184 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002185 struct be_rx_obj *rxo;
2186 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187
Sathya Perla3abcded2010-10-03 22:12:27 -07002188 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002189 q = &rxo->cq;
2190 if (q->created)
2191 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2192 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194}
2195
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002197{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002199 struct be_rx_obj *rxo;
2200 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201
Sathya Perla92bf14a2013-08-27 16:57:32 +05302202 /* We can create as many RSS rings as there are EQs. */
2203 adapter->num_rx_qs = adapter->num_evt_qs;
2204
2205 /* We'll use RSS only if atleast 2 RSS rings are supported.
2206 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302208 if (adapter->num_rx_qs > 1)
2209 adapter->num_rx_qs++;
2210
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 for_all_rx_queues(adapter, rxo, i) {
2213 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 cq = &rxo->cq;
2215 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2216 sizeof(struct be_eth_rx_compl));
2217 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219
John Stultz827da442013-10-07 15:51:58 -07002220 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2222 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002223 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002225 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226
Sathya Perlad3791422012-09-28 04:39:44 +00002227 dev_info(&adapter->pdev->dev,
2228 "created %d RSS queue(s) and 1 default RX queue\n",
2229 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002231}
2232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233static irqreturn_t be_intx(int irq, void *dev)
2234{
Sathya Perlae49cc342012-11-27 19:50:02 +00002235 struct be_eq_obj *eqo = dev;
2236 struct be_adapter *adapter = eqo->adapter;
2237 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002239 /* IRQ is not expected when NAPI is scheduled as the EQ
2240 * will not be armed.
2241 * But, this can happen on Lancer INTx where it takes
2242 * a while to de-assert INTx or in BE2 where occasionaly
2243 * an interrupt may be raised even when EQ is unarmed.
2244 * If NAPI is already scheduled, then counting & notifying
2245 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002246 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002247 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002248 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002249 __napi_schedule(&eqo->napi);
2250 if (num_evts)
2251 eqo->spurious_intr = 0;
2252 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002253 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002254
2255 /* Return IRQ_HANDLED only for the the first spurious intr
2256 * after a valid intr to stop the kernel from branding
2257 * this irq as a bad one!
2258 */
2259 if (num_evts || eqo->spurious_intr++ == 0)
2260 return IRQ_HANDLED;
2261 else
2262 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263}
2264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268
Sathya Perla0b545a62012-11-23 00:27:18 +00002269 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2270 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 return IRQ_HANDLED;
2272}
2273
Sathya Perla2e588f82011-03-11 02:49:26 +00002274static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275{
Somnath Koture38b1702013-05-29 22:55:56 +00002276 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277}
2278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302280 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281{
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 struct be_adapter *adapter = rxo->adapter;
2283 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002284 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285 u32 work_done;
2286
2287 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002288 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289 if (!rxcp)
2290 break;
2291
Sathya Perla12004ae2011-08-02 19:57:46 +00002292 /* Is it a flush compl that has no data */
2293 if (unlikely(rxcp->num_rcvd == 0))
2294 goto loop_continue;
2295
2296 /* Discard compl with partial DMA Lancer B0 */
2297 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002299 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002300 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002301
Sathya Perla12004ae2011-08-02 19:57:46 +00002302 /* On BE drop pkts that arrive due to imperfect filtering in
2303 * promiscuous mode on some skews
2304 */
2305 if (unlikely(rxcp->port != adapter->port_num &&
2306 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002308 goto loop_continue;
2309 }
2310
Sathya Perla6384a4d2013-10-25 10:40:16 +05302311 /* Don't do gro when we're busy_polling */
2312 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002314 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302315 be_rx_compl_process(rxo, napi, rxcp);
2316
Sathya Perla12004ae2011-08-02 19:57:46 +00002317loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002318 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319 }
2320
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 if (work_done) {
2322 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002323
Sathya Perla6384a4d2013-10-25 10:40:16 +05302324 /* When an rx-obj gets into post_starved state, just
2325 * let be_worker do the posting.
2326 */
2327 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2328 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332 return work_done;
2333}
2334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2336 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002339 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 for (work_done = 0; work_done < budget; work_done++) {
2342 txcp = be_tx_compl_get(&txo->cq);
2343 if (!txcp)
2344 break;
2345 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002346 AMAP_GET_BITS(struct amap_eth_tx_compl,
2347 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 }
2349
2350 if (work_done) {
2351 be_cq_notify(adapter, txo->cq.id, true, work_done);
2352 atomic_sub(num_wrbs, &txo->q.used);
2353
2354 /* As Tx wrbs have been freed up, wake up netdev queue
2355 * if it was stopped due to lack of tx wrbs. */
2356 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2357 atomic_read(&txo->q.used) < txo->q.len / 2) {
2358 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002359 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2362 tx_stats(txo)->tx_compl += work_done;
2363 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2364 }
2365 return (work_done < budget); /* Done */
2366}
Sathya Perla3c8def92011-06-12 20:01:58 +00002367
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302368int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369{
2370 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2371 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002372 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302373 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002374 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002375
Sathya Perla0b545a62012-11-23 00:27:18 +00002376 num_evts = events_get(eqo);
2377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 /* Process all TXQs serviced by this EQ */
2379 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2380 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2381 eqo->tx_budget, i);
2382 if (!tx_done)
2383 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 }
2385
Sathya Perla6384a4d2013-10-25 10:40:16 +05302386 if (be_lock_napi(eqo)) {
2387 /* This loop will iterate twice for EQ0 in which
2388 * completions of the last RXQ (default one) are also processed
2389 * For other EQs the loop iterates only once
2390 */
2391 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2392 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2393 max_work = max(work, max_work);
2394 }
2395 be_unlock_napi(eqo);
2396 } else {
2397 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002398 }
2399
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 if (is_mcc_eqo(eqo))
2401 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 if (max_work < budget) {
2404 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002405 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 } else {
2407 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002408 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002409 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411}
2412
Sathya Perla6384a4d2013-10-25 10:40:16 +05302413#ifdef CONFIG_NET_RX_BUSY_POLL
2414static int be_busy_poll(struct napi_struct *napi)
2415{
2416 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2417 struct be_adapter *adapter = eqo->adapter;
2418 struct be_rx_obj *rxo;
2419 int i, work = 0;
2420
2421 if (!be_lock_busy_poll(eqo))
2422 return LL_FLUSH_BUSY;
2423
2424 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2425 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2426 if (work)
2427 break;
2428 }
2429
2430 be_unlock_busy_poll(eqo);
2431 return work;
2432}
2433#endif
2434
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002435void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002436{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002437 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2438 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002439 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302440 bool error_detected = false;
2441 struct device *dev = &adapter->pdev->dev;
2442 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002443
Sathya Perlad23e9462012-12-17 19:38:51 +00002444 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002445 return;
2446
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002447 if (lancer_chip(adapter)) {
2448 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2449 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2450 sliport_err1 = ioread32(adapter->db +
2451 SLIPORT_ERROR1_OFFSET);
2452 sliport_err2 = ioread32(adapter->db +
2453 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302454 adapter->hw_error = true;
2455 /* Do not log error messages if its a FW reset */
2456 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2457 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2458 dev_info(dev, "Firmware update in progress\n");
2459 } else {
2460 error_detected = true;
2461 dev_err(dev, "Error detected in the card\n");
2462 dev_err(dev, "ERR: sliport status 0x%x\n",
2463 sliport_status);
2464 dev_err(dev, "ERR: sliport error1 0x%x\n",
2465 sliport_err1);
2466 dev_err(dev, "ERR: sliport error2 0x%x\n",
2467 sliport_err2);
2468 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002469 }
2470 } else {
2471 pci_read_config_dword(adapter->pdev,
2472 PCICFG_UE_STATUS_LOW, &ue_lo);
2473 pci_read_config_dword(adapter->pdev,
2474 PCICFG_UE_STATUS_HIGH, &ue_hi);
2475 pci_read_config_dword(adapter->pdev,
2476 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2477 pci_read_config_dword(adapter->pdev,
2478 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002479
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002480 ue_lo = (ue_lo & ~ue_lo_mask);
2481 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002482
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302483 /* On certain platforms BE hardware can indicate spurious UEs.
2484 * Allow HW to stop working completely in case of a real UE.
2485 * Hence not setting the hw_error for UE detection.
2486 */
2487
2488 if (ue_lo || ue_hi) {
2489 error_detected = true;
2490 dev_err(dev,
2491 "Unrecoverable Error detected in the adapter");
2492 dev_err(dev, "Please reboot server to recover");
2493 if (skyhawk_chip(adapter))
2494 adapter->hw_error = true;
2495 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2496 if (ue_lo & 1)
2497 dev_err(dev, "UE: %s bit set\n",
2498 ue_status_low_desc[i]);
2499 }
2500 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2501 if (ue_hi & 1)
2502 dev_err(dev, "UE: %s bit set\n",
2503 ue_status_hi_desc[i]);
2504 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302505 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002506 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302507 if (error_detected)
2508 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002509}
2510
Sathya Perla8d56ff12009-11-22 22:02:26 +00002511static void be_msix_disable(struct be_adapter *adapter)
2512{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002513 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002514 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002515 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302516 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002517 }
2518}
2519
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002520static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002522 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002523 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524
Sathya Perla92bf14a2013-08-27 16:57:32 +05302525 /* If RoCE is supported, program the max number of NIC vectors that
2526 * may be configured via set-channels, along with vectors needed for
2527 * RoCe. Else, just program the number we'll use initially.
2528 */
2529 if (be_roce_supported(adapter))
2530 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2531 2 * num_online_cpus());
2532 else
2533 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002534
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002535 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536 adapter->msix_entries[i].entry = i;
2537
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002538 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2539 MIN_MSIX_VECTORS, num_vec);
2540 if (num_vec < 0)
2541 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002542
Sathya Perla92bf14a2013-08-27 16:57:32 +05302543 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2544 adapter->num_msix_roce_vec = num_vec / 2;
2545 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2546 adapter->num_msix_roce_vec);
2547 }
2548
2549 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2550
2551 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2552 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002553 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002554
2555fail:
2556 dev_warn(dev, "MSIx enable failed\n");
2557
2558 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2559 if (!be_physfn(adapter))
2560 return num_vec;
2561 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562}
2563
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002564static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002565 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302567 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002568}
2569
2570static int be_msix_register(struct be_adapter *adapter)
2571{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 struct net_device *netdev = adapter->netdev;
2573 struct be_eq_obj *eqo;
2574 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 for_all_evt_queues(adapter, eqo, i) {
2577 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2578 vec = be_msix_vec_get(adapter, eqo);
2579 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002580 if (status)
2581 goto err_msix;
2582 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002583
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002585err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2587 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2588 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2589 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002590 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591 return status;
2592}
2593
2594static int be_irq_register(struct be_adapter *adapter)
2595{
2596 struct net_device *netdev = adapter->netdev;
2597 int status;
2598
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002599 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 status = be_msix_register(adapter);
2601 if (status == 0)
2602 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002603 /* INTx is not supported for VF */
2604 if (!be_physfn(adapter))
2605 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606 }
2607
Sathya Perlae49cc342012-11-27 19:50:02 +00002608 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609 netdev->irq = adapter->pdev->irq;
2610 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002611 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612 if (status) {
2613 dev_err(&adapter->pdev->dev,
2614 "INTx request IRQ failed - err %d\n", status);
2615 return status;
2616 }
2617done:
2618 adapter->isr_registered = true;
2619 return 0;
2620}
2621
2622static void be_irq_unregister(struct be_adapter *adapter)
2623{
2624 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002626 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002627
2628 if (!adapter->isr_registered)
2629 return;
2630
2631 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002632 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002633 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634 goto done;
2635 }
2636
2637 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 for_all_evt_queues(adapter, eqo, i)
2639 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002640
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641done:
2642 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643}
2644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002646{
2647 struct be_queue_info *q;
2648 struct be_rx_obj *rxo;
2649 int i;
2650
2651 for_all_rx_queues(adapter, rxo, i) {
2652 q = &rxo->q;
2653 if (q->created) {
2654 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002656 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002657 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002658 }
2659}
2660
Sathya Perla889cd4b2010-05-30 23:33:45 +00002661static int be_close(struct net_device *netdev)
2662{
2663 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 struct be_eq_obj *eqo;
2665 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002666
Parav Pandit045508a2012-03-26 14:27:13 +00002667 be_roce_dev_close(adapter);
2668
Ivan Veceradff345c52013-11-27 08:59:32 +01002669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2670 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002671 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302672 be_disable_busy_poll(eqo);
2673 }
David S. Miller71237b62013-11-28 18:53:36 -05002674 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002675 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002676
2677 be_async_mcc_disable(adapter);
2678
2679 /* Wait for all pending tx completions to arrive so that
2680 * all tx skbs are freed.
2681 */
Sathya Perlafba87552013-05-08 02:05:50 +00002682 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302683 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002684
2685 be_rx_qs_destroy(adapter);
2686
Ajit Khaparded11a3472013-11-18 10:44:37 -06002687 for (i = 1; i < (adapter->uc_macs + 1); i++)
2688 be_cmd_pmac_del(adapter, adapter->if_handle,
2689 adapter->pmac_id[i], 0);
2690 adapter->uc_macs = 0;
2691
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002692 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002693 if (msix_enabled(adapter))
2694 synchronize_irq(be_msix_vec_get(adapter, eqo));
2695 else
2696 synchronize_irq(netdev->irq);
2697 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002698 }
2699
Sathya Perla889cd4b2010-05-30 23:33:45 +00002700 be_irq_unregister(adapter);
2701
Sathya Perla482c9e72011-06-29 23:33:17 +00002702 return 0;
2703}
2704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002705static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002706{
2707 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002708 int rc, i, j;
2709 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002710
2711 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2713 sizeof(struct be_eth_rx_d));
2714 if (rc)
2715 return rc;
2716 }
2717
2718 /* The FW would like the default RXQ to be created first */
2719 rxo = default_rxo(adapter);
2720 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2721 adapter->if_handle, false, &rxo->rss_id);
2722 if (rc)
2723 return rc;
2724
2725 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002726 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 rx_frag_size, adapter->if_handle,
2728 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002729 if (rc)
2730 return rc;
2731 }
2732
2733 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002734 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2735 for_all_rss_queues(adapter, rxo, i) {
2736 if ((j + i) >= 128)
2737 break;
2738 rsstable[j + i] = rxo->rss_id;
2739 }
2740 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002741 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2742 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2743
2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002751
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2753 128);
2754 if (rc) {
2755 adapter->rss_flags = RSS_ENABLE_NONE;
2756 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002757 }
2758
2759 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002760 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002761 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002762 return 0;
2763}
2764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765static int be_open(struct net_device *netdev)
2766{
2767 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002769 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002771 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002772 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002773
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002774 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002775 if (status)
2776 goto err;
2777
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002778 status = be_irq_register(adapter);
2779 if (status)
2780 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002781
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002782 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002783 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002784
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785 for_all_tx_queues(adapter, txo, i)
2786 be_cq_notify(adapter, txo->cq.id, true, 0);
2787
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002788 be_async_mcc_enable(adapter);
2789
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790 for_all_evt_queues(adapter, eqo, i) {
2791 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302792 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2794 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002795 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796
Sathya Perla323ff712012-09-28 04:39:43 +00002797 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002798 if (!status)
2799 be_link_status_update(adapter, link_status);
2800
Sathya Perlafba87552013-05-08 02:05:50 +00002801 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002802 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002803 return 0;
2804err:
2805 be_close(adapter->netdev);
2806 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002807}
2808
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002809static int be_setup_wol(struct be_adapter *adapter, bool enable)
2810{
2811 struct be_dma_mem cmd;
2812 int status = 0;
2813 u8 mac[ETH_ALEN];
2814
2815 memset(mac, 0, ETH_ALEN);
2816
2817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002818 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2819 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002820 if (cmd.va == NULL)
2821 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002822
2823 if (enable) {
2824 status = pci_write_config_dword(adapter->pdev,
2825 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2826 if (status) {
2827 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002828 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002829 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2830 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002831 return status;
2832 }
2833 status = be_cmd_enable_magic_wol(adapter,
2834 adapter->netdev->dev_addr, &cmd);
2835 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2836 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2837 } else {
2838 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2839 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2840 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2841 }
2842
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002843 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002844 return status;
2845}
2846
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002847/*
2848 * Generate a seed MAC address from the PF MAC Address using jhash.
2849 * MAC Address for VFs are assigned incrementally starting from the seed.
2850 * These addresses are programmed in the ASIC by the PF and the VF driver
2851 * queries for the MAC address during its probe.
2852 */
Sathya Perla4c876612013-02-03 20:30:11 +00002853static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002854{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002855 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002856 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002857 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002858 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002859
2860 be_vf_eth_addr_generate(adapter, mac);
2861
Sathya Perla11ac75e2011-12-13 00:58:50 +00002862 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302863 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002864 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002865 vf_cfg->if_handle,
2866 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302867 else
2868 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2869 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002870
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002871 if (status)
2872 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002873 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002874 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002875 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002876
2877 mac[5] += 1;
2878 }
2879 return status;
2880}
2881
Sathya Perla4c876612013-02-03 20:30:11 +00002882static int be_vfs_mac_query(struct be_adapter *adapter)
2883{
2884 int status, vf;
2885 u8 mac[ETH_ALEN];
2886 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002887
2888 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302889 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2890 mac, vf_cfg->if_handle,
2891 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002892 if (status)
2893 return status;
2894 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2895 }
2896 return 0;
2897}
2898
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002899static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002900{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002901 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002902 u32 vf;
2903
Sathya Perla257a3fe2013-06-14 15:54:51 +05302904 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002905 dev_warn(&adapter->pdev->dev,
2906 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002907 goto done;
2908 }
2909
Sathya Perlab4c1df92013-05-08 02:05:47 +00002910 pci_disable_sriov(adapter->pdev);
2911
Sathya Perla11ac75e2011-12-13 00:58:50 +00002912 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302913 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002914 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2915 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302916 else
2917 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2918 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002919
Sathya Perla11ac75e2011-12-13 00:58:50 +00002920 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2921 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002922done:
2923 kfree(adapter->vf_cfg);
2924 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002925}
2926
Sathya Perla77071332013-08-27 16:57:34 +05302927static void be_clear_queues(struct be_adapter *adapter)
2928{
2929 be_mcc_queues_destroy(adapter);
2930 be_rx_cqs_destroy(adapter);
2931 be_tx_queues_destroy(adapter);
2932 be_evt_queues_destroy(adapter);
2933}
2934
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302935static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002936{
Sathya Perla191eb752012-02-23 18:50:13 +00002937 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2938 cancel_delayed_work_sync(&adapter->work);
2939 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2940 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302941}
2942
Somnath Koturb05004a2013-12-05 12:08:16 +05302943static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302944{
2945 int i;
2946
Somnath Koturb05004a2013-12-05 12:08:16 +05302947 if (adapter->pmac_id) {
2948 for (i = 0; i < (adapter->uc_macs + 1); i++)
2949 be_cmd_pmac_del(adapter, adapter->if_handle,
2950 adapter->pmac_id[i], 0);
2951 adapter->uc_macs = 0;
2952
2953 kfree(adapter->pmac_id);
2954 adapter->pmac_id = NULL;
2955 }
2956}
2957
2958static int be_clear(struct be_adapter *adapter)
2959{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302960 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002961
Sathya Perla11ac75e2011-12-13 00:58:50 +00002962 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002963 be_vf_clear(adapter);
2964
Sathya Perla2d17f402013-07-23 15:25:04 +05302965 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302966 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002967
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002968 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002969
Sathya Perla77071332013-08-27 16:57:34 +05302970 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002971
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002972 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002973 return 0;
2974}
2975
Sathya Perla4c876612013-02-03 20:30:11 +00002976static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002977{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302978 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002979 struct be_vf_cfg *vf_cfg;
2980 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002981 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002982
Sathya Perla4c876612013-02-03 20:30:11 +00002983 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2984 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002985
Sathya Perla4c876612013-02-03 20:30:11 +00002986 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302987 if (!BE3_chip(adapter)) {
2988 status = be_cmd_get_profile_config(adapter, &res,
2989 vf + 1);
2990 if (!status)
2991 cap_flags = res.if_cap_flags;
2992 }
Sathya Perla4c876612013-02-03 20:30:11 +00002993
2994 /* If a FW profile exists, then cap_flags are updated */
2995 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2996 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2997 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2998 &vf_cfg->if_handle, vf + 1);
2999 if (status)
3000 goto err;
3001 }
3002err:
3003 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003004}
3005
Sathya Perla39f1d942012-05-08 19:41:24 +00003006static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003007{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003008 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003009 int vf;
3010
Sathya Perla39f1d942012-05-08 19:41:24 +00003011 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3012 GFP_KERNEL);
3013 if (!adapter->vf_cfg)
3014 return -ENOMEM;
3015
Sathya Perla11ac75e2011-12-13 00:58:50 +00003016 for_all_vfs(adapter, vf_cfg, vf) {
3017 vf_cfg->if_handle = -1;
3018 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003019 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003020 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003021}
3022
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003023static int be_vf_setup(struct be_adapter *adapter)
3024{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003025 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003026 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00003027 int status, old_vfs, vf;
3028 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05303029 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003030
Sathya Perla257a3fe2013-06-14 15:54:51 +05303031 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003032 if (old_vfs) {
3033 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3034 if (old_vfs != num_vfs)
3035 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3036 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003037 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303038 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003039 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303040 be_max_vfs(adapter), num_vfs);
3041 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003042 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003043 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003044 }
3045
3046 status = be_vf_setup_init(adapter);
3047 if (status)
3048 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003049
Sathya Perla4c876612013-02-03 20:30:11 +00003050 if (old_vfs) {
3051 for_all_vfs(adapter, vf_cfg, vf) {
3052 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3053 if (status)
3054 goto err;
3055 }
3056 } else {
3057 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003058 if (status)
3059 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003060 }
3061
Sathya Perla4c876612013-02-03 20:30:11 +00003062 if (old_vfs) {
3063 status = be_vfs_mac_query(adapter);
3064 if (status)
3065 goto err;
3066 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003067 status = be_vf_eth_addr_config(adapter);
3068 if (status)
3069 goto err;
3070 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003071
Sathya Perla11ac75e2011-12-13 00:58:50 +00003072 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303073 /* Allow VFs to programs MAC/VLAN filters */
3074 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3075 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3076 status = be_cmd_set_fn_privileges(adapter,
3077 privileges |
3078 BE_PRIV_FILTMGMT,
3079 vf + 1);
3080 if (!status)
3081 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3082 vf);
3083 }
3084
Sathya Perla4c876612013-02-03 20:30:11 +00003085 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3086 * Allow full available bandwidth
3087 */
3088 if (BE3_chip(adapter) && !old_vfs)
3089 be_cmd_set_qos(adapter, 1000, vf+1);
3090
3091 status = be_cmd_link_status_query(adapter, &lnk_speed,
3092 NULL, vf + 1);
3093 if (!status)
3094 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003095
3096 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003097 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003098 if (status)
3099 goto err;
3100 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003101
Vasundhara Volam05998632013-10-01 15:59:59 +05303102 if (!old_vfs)
3103 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003104 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003105
3106 if (!old_vfs) {
3107 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3108 if (status) {
3109 dev_err(dev, "SRIOV enable failed\n");
3110 adapter->num_vfs = 0;
3111 goto err;
3112 }
3113 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003114 return 0;
3115err:
Sathya Perla4c876612013-02-03 20:30:11 +00003116 dev_err(dev, "VF setup failed\n");
3117 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003118 return status;
3119}
3120
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303121/* Converting function_mode bits on BE3 to SH mc_type enums */
3122
3123static u8 be_convert_mc_type(u32 function_mode)
3124{
3125 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3126 return vNIC1;
3127 else if (function_mode & FLEX10_MODE)
3128 return FLEX10;
3129 else if (function_mode & VNIC_MODE)
3130 return vNIC2;
3131 else if (function_mode & UMC_ENABLED)
3132 return UMC;
3133 else
3134 return MC_NONE;
3135}
3136
Sathya Perla92bf14a2013-08-27 16:57:32 +05303137/* On BE2/BE3 FW does not suggest the supported limits */
3138static void BEx_get_resources(struct be_adapter *adapter,
3139 struct be_resources *res)
3140{
3141 struct pci_dev *pdev = adapter->pdev;
3142 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303143 int max_vfs;
3144
3145 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303146
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303147 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303148 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303149 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303150 }
3151
3152 if (be_physfn(adapter))
3153 res->max_uc_mac = BE_UC_PMAC_COUNT;
3154 else
3155 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3156
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303157 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3158
3159 if (be_is_mc(adapter)) {
3160 /* Assuming that there are 4 channels per port,
3161 * when multi-channel is enabled
3162 */
3163 if (be_is_qnq_mode(adapter))
3164 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3165 else
3166 /* In a non-qnq multichannel mode, the pvid
3167 * takes up one vlan entry
3168 */
3169 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3170 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303171 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303172 }
3173
Sathya Perla92bf14a2013-08-27 16:57:32 +05303174 res->max_mcast_mac = BE_MAX_MC;
3175
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303176 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303177 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303178 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303179 res->max_tx_qs = 1;
3180 else
3181 res->max_tx_qs = BE3_MAX_TX_QS;
3182
3183 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3184 !use_sriov && be_physfn(adapter))
3185 res->max_rss_qs = (adapter->be3_native) ?
3186 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3187 res->max_rx_qs = res->max_rss_qs + 1;
3188
Suresh Reddye3dc8672014-01-06 13:02:25 +05303189 if (be_physfn(adapter))
3190 res->max_evt_qs = (max_vfs > 0) ?
3191 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3192 else
3193 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303194
3195 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3196 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3197 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3198}
3199
Sathya Perla30128032011-11-10 19:17:57 +00003200static void be_setup_init(struct be_adapter *adapter)
3201{
3202 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003203 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003204 adapter->if_handle = -1;
3205 adapter->be3_native = false;
3206 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003207 if (be_physfn(adapter))
3208 adapter->cmd_privileges = MAX_PRIVILEGES;
3209 else
3210 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003211}
3212
Sathya Perla92bf14a2013-08-27 16:57:32 +05303213static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003214{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303215 struct device *dev = &adapter->pdev->dev;
3216 struct be_resources res = {0};
3217 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003218
Sathya Perla92bf14a2013-08-27 16:57:32 +05303219 if (BEx_chip(adapter)) {
3220 BEx_get_resources(adapter, &res);
3221 adapter->res = res;
3222 }
3223
Sathya Perla92bf14a2013-08-27 16:57:32 +05303224 /* For Lancer, SH etc read per-function resource limits from FW.
3225 * GET_FUNC_CONFIG returns per function guaranteed limits.
3226 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3227 */
Sathya Perla4c876612013-02-03 20:30:11 +00003228 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303229 status = be_cmd_get_func_config(adapter, &res);
3230 if (status)
3231 return status;
3232
3233 /* If RoCE may be enabled stash away half the EQs for RoCE */
3234 if (be_roce_supported(adapter))
3235 res.max_evt_qs /= 2;
3236 adapter->res = res;
3237
3238 if (be_physfn(adapter)) {
3239 status = be_cmd_get_profile_config(adapter, &res, 0);
3240 if (status)
3241 return status;
3242 adapter->res.max_vfs = res.max_vfs;
3243 }
3244
3245 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3246 be_max_txqs(adapter), be_max_rxqs(adapter),
3247 be_max_rss(adapter), be_max_eqs(adapter),
3248 be_max_vfs(adapter));
3249 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3250 be_max_uc(adapter), be_max_mc(adapter),
3251 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003252 }
3253
Sathya Perla92bf14a2013-08-27 16:57:32 +05303254 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003255}
3256
Sathya Perla39f1d942012-05-08 19:41:24 +00003257/* Routine to query per function resource limits */
3258static int be_get_config(struct be_adapter *adapter)
3259{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303260 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003261 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003262
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003263 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3264 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003265 &adapter->function_caps,
3266 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003267 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303268 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003269
Vasundhara Volam542963b2014-01-15 13:23:33 +05303270 if (be_physfn(adapter)) {
3271 status = be_cmd_get_active_profile(adapter, &profile_id);
3272 if (!status)
3273 dev_info(&adapter->pdev->dev,
3274 "Using profile 0x%x\n", profile_id);
3275 }
3276
Sathya Perla92bf14a2013-08-27 16:57:32 +05303277 status = be_get_resources(adapter);
3278 if (status)
3279 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003280
3281 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303282 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3283 GFP_KERNEL);
3284 if (!adapter->pmac_id)
3285 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003286
Sathya Perla92bf14a2013-08-27 16:57:32 +05303287 /* Sanitize cfg_num_qs based on HW and platform limits */
3288 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3289
3290 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003291}
3292
Sathya Perla95046b92013-07-23 15:25:02 +05303293static int be_mac_setup(struct be_adapter *adapter)
3294{
3295 u8 mac[ETH_ALEN];
3296 int status;
3297
3298 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3299 status = be_cmd_get_perm_mac(adapter, mac);
3300 if (status)
3301 return status;
3302
3303 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3304 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3305 } else {
3306 /* Maybe the HW was reset; dev_addr must be re-programmed */
3307 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3308 }
3309
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003310 /* For BE3-R VFs, the PF programs the initial MAC address */
3311 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3312 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3313 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303314 return 0;
3315}
3316
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303317static void be_schedule_worker(struct be_adapter *adapter)
3318{
3319 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3320 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3321}
3322
Sathya Perla77071332013-08-27 16:57:34 +05303323static int be_setup_queues(struct be_adapter *adapter)
3324{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303325 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303326 int status;
3327
3328 status = be_evt_queues_create(adapter);
3329 if (status)
3330 goto err;
3331
3332 status = be_tx_qs_create(adapter);
3333 if (status)
3334 goto err;
3335
3336 status = be_rx_cqs_create(adapter);
3337 if (status)
3338 goto err;
3339
3340 status = be_mcc_queues_create(adapter);
3341 if (status)
3342 goto err;
3343
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303344 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3345 if (status)
3346 goto err;
3347
3348 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3349 if (status)
3350 goto err;
3351
Sathya Perla77071332013-08-27 16:57:34 +05303352 return 0;
3353err:
3354 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3355 return status;
3356}
3357
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303358int be_update_queues(struct be_adapter *adapter)
3359{
3360 struct net_device *netdev = adapter->netdev;
3361 int status;
3362
3363 if (netif_running(netdev))
3364 be_close(netdev);
3365
3366 be_cancel_worker(adapter);
3367
3368 /* If any vectors have been shared with RoCE we cannot re-program
3369 * the MSIx table.
3370 */
3371 if (!adapter->num_msix_roce_vec)
3372 be_msix_disable(adapter);
3373
3374 be_clear_queues(adapter);
3375
3376 if (!msix_enabled(adapter)) {
3377 status = be_msix_enable(adapter);
3378 if (status)
3379 return status;
3380 }
3381
3382 status = be_setup_queues(adapter);
3383 if (status)
3384 return status;
3385
3386 be_schedule_worker(adapter);
3387
3388 if (netif_running(netdev))
3389 status = be_open(netdev);
3390
3391 return status;
3392}
3393
Sathya Perla5fb379e2009-06-18 00:02:59 +00003394static int be_setup(struct be_adapter *adapter)
3395{
Sathya Perla39f1d942012-05-08 19:41:24 +00003396 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303397 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003398 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399
Sathya Perla30128032011-11-10 19:17:57 +00003400 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003401
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003402 if (!lancer_chip(adapter))
3403 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003404
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003405 status = be_get_config(adapter);
3406 if (status)
3407 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003408
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003409 status = be_msix_enable(adapter);
3410 if (status)
3411 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003412
Sathya Perla77071332013-08-27 16:57:34 +05303413 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3414 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3415 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3416 en_flags |= BE_IF_FLAGS_RSS;
3417 en_flags = en_flags & be_if_cap_flags(adapter);
3418 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3419 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003420 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003421 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003422
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303423 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3424 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303425 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303426 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003427 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003428 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003429
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003430 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003431
Sathya Perla95046b92013-07-23 15:25:02 +05303432 status = be_mac_setup(adapter);
3433 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003434 goto err;
3435
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003436 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003437
Somnath Koture9e2a902013-10-24 14:37:53 +05303438 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3439 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3440 adapter->fw_ver);
3441 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3442 }
3443
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003444 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003445 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003446
3447 be_set_rx_mode(adapter->netdev);
3448
Suresh Reddy76a9e082014-01-15 13:23:40 +05303449 be_cmd_get_acpi_wol_cap(adapter);
3450
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003451 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003452
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003453 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3454 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003455 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003456
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303457 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303458 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003459 be_vf_setup(adapter);
3460 else
3461 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003462 }
3463
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003464 status = be_cmd_get_phy_info(adapter);
3465 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003466 adapter->phy.fc_autoneg = 1;
3467
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303468 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003469 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003470err:
3471 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003472 return status;
3473}
3474
Ivan Vecera66268732011-12-08 01:31:21 +00003475#ifdef CONFIG_NET_POLL_CONTROLLER
3476static void be_netpoll(struct net_device *netdev)
3477{
3478 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003479 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003480 int i;
3481
Sathya Perlae49cc342012-11-27 19:50:02 +00003482 for_all_evt_queues(adapter, eqo, i) {
3483 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3484 napi_schedule(&eqo->napi);
3485 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003486
3487 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003488}
3489#endif
3490
Ajit Khaparde84517482009-09-04 03:12:16 +00003491#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003492static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003493
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003494static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003495 const u8 *p, u32 img_start, int image_size,
3496 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003497{
3498 u32 crc_offset;
3499 u8 flashed_crc[4];
3500 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003501
3502 crc_offset = hdr_size + img_start + image_size - 4;
3503
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003504 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003505
3506 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003507 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003508 if (status) {
3509 dev_err(&adapter->pdev->dev,
3510 "could not get crc from flash, not flashing redboot\n");
3511 return false;
3512 }
3513
3514 /*update redboot only if crc does not match*/
3515 if (!memcmp(flashed_crc, p, 4))
3516 return false;
3517 else
3518 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003519}
3520
Sathya Perla306f1342011-08-02 19:57:45 +00003521static bool phy_flashing_required(struct be_adapter *adapter)
3522{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003523 return (adapter->phy.phy_type == TN_8022 &&
3524 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003525}
3526
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003527static bool is_comp_in_ufi(struct be_adapter *adapter,
3528 struct flash_section_info *fsec, int type)
3529{
3530 int i = 0, img_type = 0;
3531 struct flash_section_info_g2 *fsec_g2 = NULL;
3532
Sathya Perlaca34fe32012-11-06 17:48:56 +00003533 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003534 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3535
3536 for (i = 0; i < MAX_FLASH_COMP; i++) {
3537 if (fsec_g2)
3538 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3539 else
3540 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3541
3542 if (img_type == type)
3543 return true;
3544 }
3545 return false;
3546
3547}
3548
Jingoo Han4188e7d2013-08-05 18:02:02 +09003549static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003550 int header_size,
3551 const struct firmware *fw)
3552{
3553 struct flash_section_info *fsec = NULL;
3554 const u8 *p = fw->data;
3555
3556 p += header_size;
3557 while (p < (fw->data + fw->size)) {
3558 fsec = (struct flash_section_info *)p;
3559 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3560 return fsec;
3561 p += 32;
3562 }
3563 return NULL;
3564}
3565
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003566static int be_flash(struct be_adapter *adapter, const u8 *img,
3567 struct be_dma_mem *flash_cmd, int optype, int img_size)
3568{
3569 u32 total_bytes = 0, flash_op, num_bytes = 0;
3570 int status = 0;
3571 struct be_cmd_write_flashrom *req = flash_cmd->va;
3572
3573 total_bytes = img_size;
3574 while (total_bytes) {
3575 num_bytes = min_t(u32, 32*1024, total_bytes);
3576
3577 total_bytes -= num_bytes;
3578
3579 if (!total_bytes) {
3580 if (optype == OPTYPE_PHY_FW)
3581 flash_op = FLASHROM_OPER_PHY_FLASH;
3582 else
3583 flash_op = FLASHROM_OPER_FLASH;
3584 } else {
3585 if (optype == OPTYPE_PHY_FW)
3586 flash_op = FLASHROM_OPER_PHY_SAVE;
3587 else
3588 flash_op = FLASHROM_OPER_SAVE;
3589 }
3590
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003591 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003592 img += num_bytes;
3593 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3594 flash_op, num_bytes);
3595 if (status) {
3596 if (status == ILLEGAL_IOCTL_REQ &&
3597 optype == OPTYPE_PHY_FW)
3598 break;
3599 dev_err(&adapter->pdev->dev,
3600 "cmd to write to flash rom failed.\n");
3601 return status;
3602 }
3603 }
3604 return 0;
3605}
3606
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003607/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003608static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003609 const struct firmware *fw,
3610 struct be_dma_mem *flash_cmd,
3611 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003612
Ajit Khaparde84517482009-09-04 03:12:16 +00003613{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003614 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003615 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003616 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003617 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003618 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003619 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003620
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003621 struct flash_comp gen3_flash_types[] = {
3622 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3623 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3624 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3625 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3626 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3627 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3628 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3629 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3630 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3631 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3632 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3633 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3634 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3635 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3636 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3637 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3638 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3639 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3640 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3641 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003642 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003643
3644 struct flash_comp gen2_flash_types[] = {
3645 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3646 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3647 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3648 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3649 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3650 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3651 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3652 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3653 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3654 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3655 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3656 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3657 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3658 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3659 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3660 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003661 };
3662
Sathya Perlaca34fe32012-11-06 17:48:56 +00003663 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003664 pflashcomp = gen3_flash_types;
3665 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003666 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003667 } else {
3668 pflashcomp = gen2_flash_types;
3669 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003670 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003671 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003672
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003673 /* Get flash section info*/
3674 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3675 if (!fsec) {
3676 dev_err(&adapter->pdev->dev,
3677 "Invalid Cookie. UFI corrupted ?\n");
3678 return -1;
3679 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003680 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003681 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003682 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003683
3684 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3685 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3686 continue;
3687
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003688 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3689 !phy_flashing_required(adapter))
3690 continue;
3691
3692 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3693 redboot = be_flash_redboot(adapter, fw->data,
3694 pflashcomp[i].offset, pflashcomp[i].size,
3695 filehdr_size + img_hdrs_size);
3696 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003697 continue;
3698 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003699
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003700 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003701 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003702 if (p + pflashcomp[i].size > fw->data + fw->size)
3703 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704
3705 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3706 pflashcomp[i].size);
3707 if (status) {
3708 dev_err(&adapter->pdev->dev,
3709 "Flashing section type %d failed.\n",
3710 pflashcomp[i].img_type);
3711 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003712 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003713 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003714 return 0;
3715}
3716
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003717static int be_flash_skyhawk(struct be_adapter *adapter,
3718 const struct firmware *fw,
3719 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003720{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003721 int status = 0, i, filehdr_size = 0;
3722 int img_offset, img_size, img_optype, redboot;
3723 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3724 const u8 *p = fw->data;
3725 struct flash_section_info *fsec = NULL;
3726
3727 filehdr_size = sizeof(struct flash_file_hdr_g3);
3728 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3729 if (!fsec) {
3730 dev_err(&adapter->pdev->dev,
3731 "Invalid Cookie. UFI corrupted ?\n");
3732 return -1;
3733 }
3734
3735 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3736 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3737 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3738
3739 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3740 case IMAGE_FIRMWARE_iSCSI:
3741 img_optype = OPTYPE_ISCSI_ACTIVE;
3742 break;
3743 case IMAGE_BOOT_CODE:
3744 img_optype = OPTYPE_REDBOOT;
3745 break;
3746 case IMAGE_OPTION_ROM_ISCSI:
3747 img_optype = OPTYPE_BIOS;
3748 break;
3749 case IMAGE_OPTION_ROM_PXE:
3750 img_optype = OPTYPE_PXE_BIOS;
3751 break;
3752 case IMAGE_OPTION_ROM_FCoE:
3753 img_optype = OPTYPE_FCOE_BIOS;
3754 break;
3755 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3756 img_optype = OPTYPE_ISCSI_BACKUP;
3757 break;
3758 case IMAGE_NCSI:
3759 img_optype = OPTYPE_NCSI_FW;
3760 break;
3761 default:
3762 continue;
3763 }
3764
3765 if (img_optype == OPTYPE_REDBOOT) {
3766 redboot = be_flash_redboot(adapter, fw->data,
3767 img_offset, img_size,
3768 filehdr_size + img_hdrs_size);
3769 if (!redboot)
3770 continue;
3771 }
3772
3773 p = fw->data;
3774 p += filehdr_size + img_offset + img_hdrs_size;
3775 if (p + img_size > fw->data + fw->size)
3776 return -1;
3777
3778 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3779 if (status) {
3780 dev_err(&adapter->pdev->dev,
3781 "Flashing section type %d failed.\n",
3782 fsec->fsec_entry[i].type);
3783 return status;
3784 }
3785 }
3786 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003787}
3788
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003789static int lancer_fw_download(struct be_adapter *adapter,
3790 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003791{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003792#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3793#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3794 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003795 const u8 *data_ptr = NULL;
3796 u8 *dest_image_ptr = NULL;
3797 size_t image_size = 0;
3798 u32 chunk_size = 0;
3799 u32 data_written = 0;
3800 u32 offset = 0;
3801 int status = 0;
3802 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003803 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003804
3805 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3806 dev_err(&adapter->pdev->dev,
3807 "FW Image not properly aligned. "
3808 "Length must be 4 byte aligned.\n");
3809 status = -EINVAL;
3810 goto lancer_fw_exit;
3811 }
3812
3813 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3814 + LANCER_FW_DOWNLOAD_CHUNK;
3815 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003816 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003817 if (!flash_cmd.va) {
3818 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003819 goto lancer_fw_exit;
3820 }
3821
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003822 dest_image_ptr = flash_cmd.va +
3823 sizeof(struct lancer_cmd_req_write_object);
3824 image_size = fw->size;
3825 data_ptr = fw->data;
3826
3827 while (image_size) {
3828 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3829
3830 /* Copy the image chunk content. */
3831 memcpy(dest_image_ptr, data_ptr, chunk_size);
3832
3833 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003834 chunk_size, offset,
3835 LANCER_FW_DOWNLOAD_LOCATION,
3836 &data_written, &change_status,
3837 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003838 if (status)
3839 break;
3840
3841 offset += data_written;
3842 data_ptr += data_written;
3843 image_size -= data_written;
3844 }
3845
3846 if (!status) {
3847 /* Commit the FW written */
3848 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003849 0, offset,
3850 LANCER_FW_DOWNLOAD_LOCATION,
3851 &data_written, &change_status,
3852 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003853 }
3854
3855 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3856 flash_cmd.dma);
3857 if (status) {
3858 dev_err(&adapter->pdev->dev,
3859 "Firmware load error. "
3860 "Status code: 0x%x Additional Status: 0x%x\n",
3861 status, add_status);
3862 goto lancer_fw_exit;
3863 }
3864
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003865 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303866 dev_info(&adapter->pdev->dev,
3867 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003868 status = lancer_physdev_ctrl(adapter,
3869 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003870 if (status) {
3871 dev_err(&adapter->pdev->dev,
3872 "Adapter busy for FW reset.\n"
3873 "New FW will not be active.\n");
3874 goto lancer_fw_exit;
3875 }
3876 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3877 dev_err(&adapter->pdev->dev,
3878 "System reboot required for new FW"
3879 " to be active\n");
3880 }
3881
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003882 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3883lancer_fw_exit:
3884 return status;
3885}
3886
Sathya Perlaca34fe32012-11-06 17:48:56 +00003887#define UFI_TYPE2 2
3888#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003889#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003890#define UFI_TYPE4 4
3891static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003892 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003893{
3894 if (fhdr == NULL)
3895 goto be_get_ufi_exit;
3896
Sathya Perlaca34fe32012-11-06 17:48:56 +00003897 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3898 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003899 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3900 if (fhdr->asic_type_rev == 0x10)
3901 return UFI_TYPE3R;
3902 else
3903 return UFI_TYPE3;
3904 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003905 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003906
3907be_get_ufi_exit:
3908 dev_err(&adapter->pdev->dev,
3909 "UFI and Interface are not compatible for flashing\n");
3910 return -1;
3911}
3912
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003913static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3914{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003915 struct flash_file_hdr_g3 *fhdr3;
3916 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003917 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003918 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003919 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003920
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003921 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003922 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3923 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003924 if (!flash_cmd.va) {
3925 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003926 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003927 }
3928
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003929 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003930 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003931
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003932 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003933
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003934 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3935 for (i = 0; i < num_imgs; i++) {
3936 img_hdr_ptr = (struct image_hdr *)(fw->data +
3937 (sizeof(struct flash_file_hdr_g3) +
3938 i * sizeof(struct image_hdr)));
3939 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003940 switch (ufi_type) {
3941 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003942 status = be_flash_skyhawk(adapter, fw,
3943 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003944 break;
3945 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003946 status = be_flash_BEx(adapter, fw, &flash_cmd,
3947 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003948 break;
3949 case UFI_TYPE3:
3950 /* Do not flash this ufi on BE3-R cards */
3951 if (adapter->asic_rev < 0x10)
3952 status = be_flash_BEx(adapter, fw,
3953 &flash_cmd,
3954 num_imgs);
3955 else {
3956 status = -1;
3957 dev_err(&adapter->pdev->dev,
3958 "Can't load BE3 UFI on BE3R\n");
3959 }
3960 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003961 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003962 }
3963
Sathya Perlaca34fe32012-11-06 17:48:56 +00003964 if (ufi_type == UFI_TYPE2)
3965 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003966 else if (ufi_type == -1)
3967 status = -1;
3968
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003969 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3970 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003971 if (status) {
3972 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003973 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003974 }
3975
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003976 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003977
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003978be_fw_exit:
3979 return status;
3980}
3981
3982int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3983{
3984 const struct firmware *fw;
3985 int status;
3986
3987 if (!netif_running(adapter->netdev)) {
3988 dev_err(&adapter->pdev->dev,
3989 "Firmware load not allowed (interface is down)\n");
3990 return -1;
3991 }
3992
3993 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3994 if (status)
3995 goto fw_exit;
3996
3997 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3998
3999 if (lancer_chip(adapter))
4000 status = lancer_fw_download(adapter, fw);
4001 else
4002 status = be_fw_download(adapter, fw);
4003
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004004 if (!status)
4005 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4006 adapter->fw_on_flash);
4007
Ajit Khaparde84517482009-09-04 03:12:16 +00004008fw_exit:
4009 release_firmware(fw);
4010 return status;
4011}
4012
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004013static int be_ndo_bridge_setlink(struct net_device *dev,
4014 struct nlmsghdr *nlh)
4015{
4016 struct be_adapter *adapter = netdev_priv(dev);
4017 struct nlattr *attr, *br_spec;
4018 int rem;
4019 int status = 0;
4020 u16 mode = 0;
4021
4022 if (!sriov_enabled(adapter))
4023 return -EOPNOTSUPP;
4024
4025 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4026
4027 nla_for_each_nested(attr, br_spec, rem) {
4028 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4029 continue;
4030
4031 mode = nla_get_u16(attr);
4032 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4033 return -EINVAL;
4034
4035 status = be_cmd_set_hsw_config(adapter, 0, 0,
4036 adapter->if_handle,
4037 mode == BRIDGE_MODE_VEPA ?
4038 PORT_FWD_TYPE_VEPA :
4039 PORT_FWD_TYPE_VEB);
4040 if (status)
4041 goto err;
4042
4043 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4044 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4045
4046 return status;
4047 }
4048err:
4049 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4050 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4051
4052 return status;
4053}
4054
4055static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4056 struct net_device *dev,
4057 u32 filter_mask)
4058{
4059 struct be_adapter *adapter = netdev_priv(dev);
4060 int status = 0;
4061 u8 hsw_mode;
4062
4063 if (!sriov_enabled(adapter))
4064 return 0;
4065
4066 /* BE and Lancer chips support VEB mode only */
4067 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4068 hsw_mode = PORT_FWD_TYPE_VEB;
4069 } else {
4070 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4071 adapter->if_handle, &hsw_mode);
4072 if (status)
4073 return 0;
4074 }
4075
4076 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4077 hsw_mode == PORT_FWD_TYPE_VEPA ?
4078 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4079}
4080
stephen hemmingere5686ad2012-01-05 19:10:25 +00004081static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004082 .ndo_open = be_open,
4083 .ndo_stop = be_close,
4084 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004085 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004086 .ndo_set_mac_address = be_mac_addr_set,
4087 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004088 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004089 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004090 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4091 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004092 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004093 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004094 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004095 .ndo_get_vf_config = be_get_vf_config,
4096#ifdef CONFIG_NET_POLL_CONTROLLER
4097 .ndo_poll_controller = be_netpoll,
4098#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004099 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4100 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304101#ifdef CONFIG_NET_RX_BUSY_POLL
4102 .ndo_busy_poll = be_busy_poll
4103#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004104};
4105
4106static void be_netdev_init(struct net_device *netdev)
4107{
4108 struct be_adapter *adapter = netdev_priv(netdev);
4109
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004110 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004111 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004112 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004113 if (be_multi_rxq(adapter))
4114 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004115
4116 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004117 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004118
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004119 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004120 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004121
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004122 netdev->priv_flags |= IFF_UNICAST_FLT;
4123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004124 netdev->flags |= IFF_MULTICAST;
4125
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004126 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004127
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004128 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004129
4130 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004131}
4132
4133static void be_unmap_pci_bars(struct be_adapter *adapter)
4134{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004135 if (adapter->csr)
4136 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004137 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004138 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004139}
4140
Sathya Perlace66f782012-11-06 17:48:58 +00004141static int db_bar(struct be_adapter *adapter)
4142{
4143 if (lancer_chip(adapter) || !be_physfn(adapter))
4144 return 0;
4145 else
4146 return 4;
4147}
4148
4149static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004150{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004151 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004152 adapter->roce_db.size = 4096;
4153 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4154 db_bar(adapter));
4155 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4156 db_bar(adapter));
4157 }
Parav Pandit045508a2012-03-26 14:27:13 +00004158 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004159}
4160
4161static int be_map_pci_bars(struct be_adapter *adapter)
4162{
4163 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004164
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004165 if (BEx_chip(adapter) && be_physfn(adapter)) {
4166 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4167 if (adapter->csr == NULL)
4168 return -ENOMEM;
4169 }
4170
Sathya Perlace66f782012-11-06 17:48:58 +00004171 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004172 if (addr == NULL)
4173 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004174 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004175
4176 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004178
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004179pci_map_err:
4180 be_unmap_pci_bars(adapter);
4181 return -ENOMEM;
4182}
4183
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004184static void be_ctrl_cleanup(struct be_adapter *adapter)
4185{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004186 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004187
4188 be_unmap_pci_bars(adapter);
4189
4190 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004191 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4192 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004193
Sathya Perla5b8821b2011-08-02 19:57:44 +00004194 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004195 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004196 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4197 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004198}
4199
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004200static int be_ctrl_init(struct be_adapter *adapter)
4201{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004202 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4203 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004204 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004205 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004206 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207
Sathya Perlace66f782012-11-06 17:48:58 +00004208 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4209 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4210 SLI_INTF_FAMILY_SHIFT;
4211 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4212
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004213 status = be_map_pci_bars(adapter);
4214 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004215 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004216
4217 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004218 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4219 mbox_mem_alloc->size,
4220 &mbox_mem_alloc->dma,
4221 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004222 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004223 status = -ENOMEM;
4224 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225 }
4226 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4227 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4228 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4229 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004230
Sathya Perla5b8821b2011-08-02 19:57:44 +00004231 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004232 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4233 rx_filter->size, &rx_filter->dma,
4234 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004235 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004236 status = -ENOMEM;
4237 goto free_mbox;
4238 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004239
Ivan Vecera29849612010-12-14 05:43:19 +00004240 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004241 spin_lock_init(&adapter->mcc_lock);
4242 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004243
Suresh Reddy5eeff632014-01-06 13:02:24 +05304244 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004245 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004246 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004247
4248free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004249 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4250 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004251
4252unmap_pci_bars:
4253 be_unmap_pci_bars(adapter);
4254
4255done:
4256 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257}
4258
4259static void be_stats_cleanup(struct be_adapter *adapter)
4260{
Sathya Perla3abcded2010-10-03 22:12:27 -07004261 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004262
4263 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004264 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4265 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004266}
4267
4268static int be_stats_init(struct be_adapter *adapter)
4269{
Sathya Perla3abcded2010-10-03 22:12:27 -07004270 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004271
Sathya Perlaca34fe32012-11-06 17:48:56 +00004272 if (lancer_chip(adapter))
4273 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4274 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004275 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004276 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004277 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004278 else
4279 /* ALL non-BE ASICs */
4280 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004281
Joe Perchesede23fa2013-08-26 22:45:23 -07004282 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4283 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004284 if (cmd->va == NULL)
4285 return -1;
4286 return 0;
4287}
4288
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004289static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004290{
4291 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004293 if (!adapter)
4294 return;
4295
Parav Pandit045508a2012-03-26 14:27:13 +00004296 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004297 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004298
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004299 cancel_delayed_work_sync(&adapter->func_recovery_work);
4300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004301 unregister_netdev(adapter->netdev);
4302
Sathya Perla5fb379e2009-06-18 00:02:59 +00004303 be_clear(adapter);
4304
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004305 /* tell fw we're done with firing cmds */
4306 be_cmd_fw_clean(adapter);
4307
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004308 be_stats_cleanup(adapter);
4309
4310 be_ctrl_cleanup(adapter);
4311
Sathya Perlad6b6d982012-09-05 01:56:48 +00004312 pci_disable_pcie_error_reporting(pdev);
4313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004314 pci_release_regions(pdev);
4315 pci_disable_device(pdev);
4316
4317 free_netdev(adapter->netdev);
4318}
4319
Sathya Perla39f1d942012-05-08 19:41:24 +00004320static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004321{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304322 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004323
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004324 status = be_cmd_get_cntl_attributes(adapter);
4325 if (status)
4326 return status;
4327
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004328 /* Must be a power of 2 or else MODULO will BUG_ON */
4329 adapter->be_get_temp_freq = 64;
4330
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304331 if (BEx_chip(adapter)) {
4332 level = be_cmd_get_fw_log_level(adapter);
4333 adapter->msg_enable =
4334 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4335 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004336
Sathya Perla92bf14a2013-08-27 16:57:32 +05304337 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004338 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004339}
4340
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004341static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004342{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004343 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004344 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004345
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004346 status = lancer_test_and_set_rdy_state(adapter);
4347 if (status)
4348 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004349
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004350 if (netif_running(adapter->netdev))
4351 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004352
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004353 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004354
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004355 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004356
4357 status = be_setup(adapter);
4358 if (status)
4359 goto err;
4360
4361 if (netif_running(adapter->netdev)) {
4362 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004363 if (status)
4364 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004365 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004366
Somnath Kotur4bebb562013-12-05 12:07:55 +05304367 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004368 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004369err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004370 if (status == -EAGAIN)
4371 dev_err(dev, "Waiting for resource provisioning\n");
4372 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304373 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004374
4375 return status;
4376}
4377
4378static void be_func_recovery_task(struct work_struct *work)
4379{
4380 struct be_adapter *adapter =
4381 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004382 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004383
4384 be_detect_error(adapter);
4385
4386 if (adapter->hw_error && lancer_chip(adapter)) {
4387
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004388 rtnl_lock();
4389 netif_device_detach(adapter->netdev);
4390 rtnl_unlock();
4391
4392 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004393 if (!status)
4394 netif_device_attach(adapter->netdev);
4395 }
4396
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004397 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4398 * no need to attempt further recovery.
4399 */
4400 if (!status || status == -EAGAIN)
4401 schedule_delayed_work(&adapter->func_recovery_work,
4402 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004403}
4404
4405static void be_worker(struct work_struct *work)
4406{
4407 struct be_adapter *adapter =
4408 container_of(work, struct be_adapter, work.work);
4409 struct be_rx_obj *rxo;
4410 int i;
4411
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004412 /* when interrupts are not yet enabled, just reap any pending
4413 * mcc completions */
4414 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004415 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004416 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004417 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004418 goto reschedule;
4419 }
4420
4421 if (!adapter->stats_cmd_sent) {
4422 if (lancer_chip(adapter))
4423 lancer_cmd_get_pport_stats(adapter,
4424 &adapter->stats_cmd);
4425 else
4426 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4427 }
4428
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304429 if (be_physfn(adapter) &&
4430 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004431 be_cmd_get_die_temperature(adapter);
4432
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004433 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304434 /* Replenish RX-queues starved due to memory
4435 * allocation failures.
4436 */
4437 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004438 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004439 }
4440
Sathya Perla2632baf2013-10-01 16:00:00 +05304441 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004442
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004443reschedule:
4444 adapter->work_counter++;
4445 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4446}
4447
Sathya Perla257a3fe2013-06-14 15:54:51 +05304448/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004449static bool be_reset_required(struct be_adapter *adapter)
4450{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304451 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004452}
4453
Sathya Perlad3791422012-09-28 04:39:44 +00004454static char *mc_name(struct be_adapter *adapter)
4455{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304456 char *str = ""; /* default */
4457
4458 switch (adapter->mc_type) {
4459 case UMC:
4460 str = "UMC";
4461 break;
4462 case FLEX10:
4463 str = "FLEX10";
4464 break;
4465 case vNIC1:
4466 str = "vNIC-1";
4467 break;
4468 case nPAR:
4469 str = "nPAR";
4470 break;
4471 case UFP:
4472 str = "UFP";
4473 break;
4474 case vNIC2:
4475 str = "vNIC-2";
4476 break;
4477 default:
4478 str = "";
4479 }
4480
4481 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004482}
4483
4484static inline char *func_name(struct be_adapter *adapter)
4485{
4486 return be_physfn(adapter) ? "PF" : "VF";
4487}
4488
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004489static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004490{
4491 int status = 0;
4492 struct be_adapter *adapter;
4493 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004494 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004495
4496 status = pci_enable_device(pdev);
4497 if (status)
4498 goto do_none;
4499
4500 status = pci_request_regions(pdev, DRV_NAME);
4501 if (status)
4502 goto disable_dev;
4503 pci_set_master(pdev);
4504
Sathya Perla7f640062012-06-05 19:37:20 +00004505 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004506 if (netdev == NULL) {
4507 status = -ENOMEM;
4508 goto rel_reg;
4509 }
4510 adapter = netdev_priv(netdev);
4511 adapter->pdev = pdev;
4512 pci_set_drvdata(pdev, adapter);
4513 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004514 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515
Russell King4c15c242013-06-26 23:49:11 +01004516 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517 if (!status) {
4518 netdev->features |= NETIF_F_HIGHDMA;
4519 } else {
Russell King4c15c242013-06-26 23:49:11 +01004520 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521 if (status) {
4522 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4523 goto free_netdev;
4524 }
4525 }
4526
Ajit Khapardeea58c182013-10-18 16:06:24 -05004527 if (be_physfn(adapter)) {
4528 status = pci_enable_pcie_error_reporting(pdev);
4529 if (!status)
4530 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4531 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004532
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004533 status = be_ctrl_init(adapter);
4534 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004535 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004536
Sathya Perla2243e2e2009-11-22 22:02:03 +00004537 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004538 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004539 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004540 if (status)
4541 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004542 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004543
Sathya Perla39f1d942012-05-08 19:41:24 +00004544 if (be_reset_required(adapter)) {
4545 status = be_cmd_reset_function(adapter);
4546 if (status)
4547 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004548
Kalesh AP2d177be2013-04-28 22:22:29 +00004549 /* Wait for interrupts to quiesce after an FLR */
4550 msleep(100);
4551 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004552
4553 /* Allow interrupts for other ULPs running on NIC function */
4554 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004555
Kalesh AP2d177be2013-04-28 22:22:29 +00004556 /* tell fw we're ready to fire cmds */
4557 status = be_cmd_fw_init(adapter);
4558 if (status)
4559 goto ctrl_clean;
4560
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561 status = be_stats_init(adapter);
4562 if (status)
4563 goto ctrl_clean;
4564
Sathya Perla39f1d942012-05-08 19:41:24 +00004565 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566 if (status)
4567 goto stats_clean;
4568
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004569 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004570 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004571 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004572
Sathya Perla5fb379e2009-06-18 00:02:59 +00004573 status = be_setup(adapter);
4574 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004575 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004576
Sathya Perla3abcded2010-10-03 22:12:27 -07004577 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004578 status = register_netdev(netdev);
4579 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004580 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004581
Parav Pandit045508a2012-03-26 14:27:13 +00004582 be_roce_dev_add(adapter);
4583
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004584 schedule_delayed_work(&adapter->func_recovery_work,
4585 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004586
4587 be_cmd_query_port_name(adapter, &port_name);
4588
Sathya Perlad3791422012-09-28 04:39:44 +00004589 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4590 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004592 return 0;
4593
Sathya Perla5fb379e2009-06-18 00:02:59 +00004594unsetup:
4595 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004596stats_clean:
4597 be_stats_cleanup(adapter);
4598ctrl_clean:
4599 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004600free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004601 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004602rel_reg:
4603 pci_release_regions(pdev);
4604disable_dev:
4605 pci_disable_device(pdev);
4606do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004607 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004608 return status;
4609}
4610
4611static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4612{
4613 struct be_adapter *adapter = pci_get_drvdata(pdev);
4614 struct net_device *netdev = adapter->netdev;
4615
Suresh Reddy76a9e082014-01-15 13:23:40 +05304616 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004617 be_setup_wol(adapter, true);
4618
Ajit Khaparded4360d62013-11-22 12:51:09 -06004619 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004620 cancel_delayed_work_sync(&adapter->func_recovery_work);
4621
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004622 netif_device_detach(netdev);
4623 if (netif_running(netdev)) {
4624 rtnl_lock();
4625 be_close(netdev);
4626 rtnl_unlock();
4627 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004628 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004629
4630 pci_save_state(pdev);
4631 pci_disable_device(pdev);
4632 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4633 return 0;
4634}
4635
4636static int be_resume(struct pci_dev *pdev)
4637{
4638 int status = 0;
4639 struct be_adapter *adapter = pci_get_drvdata(pdev);
4640 struct net_device *netdev = adapter->netdev;
4641
4642 netif_device_detach(netdev);
4643
4644 status = pci_enable_device(pdev);
4645 if (status)
4646 return status;
4647
Yijing Wang1ca01512013-06-27 20:53:42 +08004648 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004649 pci_restore_state(pdev);
4650
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304651 status = be_fw_wait_ready(adapter);
4652 if (status)
4653 return status;
4654
Ajit Khaparded4360d62013-11-22 12:51:09 -06004655 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004656 /* tell fw we're ready to fire cmds */
4657 status = be_cmd_fw_init(adapter);
4658 if (status)
4659 return status;
4660
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004661 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004662 if (netif_running(netdev)) {
4663 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004664 be_open(netdev);
4665 rtnl_unlock();
4666 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004667
4668 schedule_delayed_work(&adapter->func_recovery_work,
4669 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004670 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004671
Suresh Reddy76a9e082014-01-15 13:23:40 +05304672 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004673 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004674
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004675 return 0;
4676}
4677
Sathya Perla82456b02010-02-17 01:35:37 +00004678/*
4679 * An FLR will stop BE from DMAing any data.
4680 */
4681static void be_shutdown(struct pci_dev *pdev)
4682{
4683 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004684
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004685 if (!adapter)
4686 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004687
Sathya Perla0f4a6822011-03-21 20:49:28 +00004688 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004689 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004690
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004691 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004692
Ajit Khaparde57841862011-04-06 18:08:43 +00004693 be_cmd_reset_function(adapter);
4694
Sathya Perla82456b02010-02-17 01:35:37 +00004695 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004696}
4697
Sathya Perlacf588472010-02-14 21:22:01 +00004698static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4699 pci_channel_state_t state)
4700{
4701 struct be_adapter *adapter = pci_get_drvdata(pdev);
4702 struct net_device *netdev = adapter->netdev;
4703
4704 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4705
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004706 if (!adapter->eeh_error) {
4707 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004708
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004709 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004710
Sathya Perlacf588472010-02-14 21:22:01 +00004711 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004712 netif_device_detach(netdev);
4713 if (netif_running(netdev))
4714 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004715 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004716
4717 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004718 }
Sathya Perlacf588472010-02-14 21:22:01 +00004719
4720 if (state == pci_channel_io_perm_failure)
4721 return PCI_ERS_RESULT_DISCONNECT;
4722
4723 pci_disable_device(pdev);
4724
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004725 /* The error could cause the FW to trigger a flash debug dump.
4726 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004727 * can cause it not to recover; wait for it to finish.
4728 * Wait only for first function as it is needed only once per
4729 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004730 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004731 if (pdev->devfn == 0)
4732 ssleep(30);
4733
Sathya Perlacf588472010-02-14 21:22:01 +00004734 return PCI_ERS_RESULT_NEED_RESET;
4735}
4736
4737static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4738{
4739 struct be_adapter *adapter = pci_get_drvdata(pdev);
4740 int status;
4741
4742 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004743
4744 status = pci_enable_device(pdev);
4745 if (status)
4746 return PCI_ERS_RESULT_DISCONNECT;
4747
4748 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004749 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004750 pci_restore_state(pdev);
4751
4752 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004753 dev_info(&adapter->pdev->dev,
4754 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004755 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004756 if (status)
4757 return PCI_ERS_RESULT_DISCONNECT;
4758
Sathya Perlad6b6d982012-09-05 01:56:48 +00004759 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004760 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004761 return PCI_ERS_RESULT_RECOVERED;
4762}
4763
4764static void be_eeh_resume(struct pci_dev *pdev)
4765{
4766 int status = 0;
4767 struct be_adapter *adapter = pci_get_drvdata(pdev);
4768 struct net_device *netdev = adapter->netdev;
4769
4770 dev_info(&adapter->pdev->dev, "EEH resume\n");
4771
4772 pci_save_state(pdev);
4773
Kalesh AP2d177be2013-04-28 22:22:29 +00004774 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004775 if (status)
4776 goto err;
4777
Kalesh AP2d177be2013-04-28 22:22:29 +00004778 /* tell fw we're ready to fire cmds */
4779 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004780 if (status)
4781 goto err;
4782
Sathya Perlacf588472010-02-14 21:22:01 +00004783 status = be_setup(adapter);
4784 if (status)
4785 goto err;
4786
4787 if (netif_running(netdev)) {
4788 status = be_open(netdev);
4789 if (status)
4790 goto err;
4791 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004792
4793 schedule_delayed_work(&adapter->func_recovery_work,
4794 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004795 netif_device_attach(netdev);
4796 return;
4797err:
4798 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004799}
4800
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004801static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004802 .error_detected = be_eeh_err_detected,
4803 .slot_reset = be_eeh_reset,
4804 .resume = be_eeh_resume,
4805};
4806
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004807static struct pci_driver be_driver = {
4808 .name = DRV_NAME,
4809 .id_table = be_dev_ids,
4810 .probe = be_probe,
4811 .remove = be_remove,
4812 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004813 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004814 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004815 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004816};
4817
4818static int __init be_init_module(void)
4819{
Joe Perches8e95a202009-12-03 07:58:21 +00004820 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4821 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004822 printk(KERN_WARNING DRV_NAME
4823 " : Module param rx_frag_size must be 2048/4096/8192."
4824 " Using 2048\n");
4825 rx_frag_size = 2048;
4826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004827
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004828 return pci_register_driver(&be_driver);
4829}
4830module_init(be_init_module);
4831
4832static void __exit be_exit_module(void)
4833{
4834 pci_unregister_driver(&be_driver);
4835}
4836module_exit(be_exit_module);