blob: 36c80612e21a3ebe6ee52447e9075dc4022f005e [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530916static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000926 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000927 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000932 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
936
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 /* If vlan tag is already inlined in the packet, skip HW VLAN
938 * tagging in UMC mode
939 */
940 if ((adapter->function_mode & UMC_ENABLED) &&
941 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000943
Somnath Kotur93040ae2012-06-26 22:32:10 +0000944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000951 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530952 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000973 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530974 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000975 }
976
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 return skb;
978tx_drop:
979 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530980err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000981 return NULL;
982}
983
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530984static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985 struct sk_buff *skb,
986 bool *skip_hw_vlan)
987{
988 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989 * less may cause a transmit stall on that port. So the work-around is
990 * to pad short packets (<= 32 bytes) to a 36-byte length.
991 */
992 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993 if (skb_padto(skb, 36))
994 return NULL;
995 skb->len = 36;
996 }
997
998 if (BEx_chip(adapter) || lancer_chip(adapter)) {
999 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000 if (!skb)
1001 return NULL;
1002 }
1003
1004 return skb;
1005}
1006
Sathya Perlaee9c7992013-05-22 23:04:55 +00001007static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
1010 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011 struct be_queue_info *txq = &txo->q;
1012 bool dummy_wrb, stopped = false;
1013 u32 wrb_cnt = 0, copied = 0;
1014 bool skip_hw_vlan = false;
1015 u32 start = txq->head;
1016
1017 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301018 if (!skb) {
1019 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001020 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301021 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001022
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001023 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001025 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001027 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001028 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001030 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001031 BUG_ON(txo->sent_skb_list[start]);
1032 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001034 /* Ensure txq has space for the next skb; Else stop the queue
1035 * *BEFORE* ringing the tx doorbell, so that we serialze the
1036 * tx compls of the current transmit which'll wake up the queue
1037 */
Sathya Perla7101e112010-03-22 20:41:12 +00001038 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001039 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001041 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001042 stopped = true;
1043 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001045 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001046
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001047 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001048 } else {
1049 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301050 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 return NETDEV_TX_OK;
1054}
1055
1056static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
1059 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001060 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 dev_info(&adapter->pdev->dev,
1063 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001064 BE_MIN_MTU,
1065 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066 return -EINVAL;
1067 }
1068 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069 netdev->mtu, new_mtu);
1070 netdev->mtu = new_mtu;
1071 return 0;
1072}
1073
1074/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001075 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 */
Sathya Perla10329df2012-06-05 19:37:18 +00001078static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079{
Sathya Perla10329df2012-06-05 19:37:18 +00001080 u16 vids[BE_NUM_VLANS_SUPPORTED];
1081 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001082 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001083
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001084 /* No need to further configure vids if in promiscuous mode */
1085 if (adapter->promiscuous)
1086 return 0;
1087
Sathya Perla92bf14a2013-08-27 16:57:32 +05301088 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001089 goto set_vlan_promisc;
1090
1091 /* Construct VLAN Table to give to HW */
1092 for (i = 0; i < VLAN_N_VID; i++)
1093 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001094 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001095
1096 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001097 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001098
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001099 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001100 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102 goto set_vlan_promisc;
1103 dev_err(&adapter->pdev->dev,
1104 "Setting HW VLAN filtering failed.\n");
1105 } else {
1106 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107 /* hw VLAN filtering re-enabled. */
1108 status = be_cmd_rx_filter(adapter,
1109 BE_FLAGS_VLAN_PROMISC, OFF);
1110 if (!status) {
1111 dev_info(&adapter->pdev->dev,
1112 "Disabling VLAN Promiscuous mode.\n");
1113 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001114 }
1115 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001117
Sathya Perlab31c50a2009-09-17 10:30:13 -07001118 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001119
1120set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001123
1124 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128 } else
1129 dev_err(&adapter->pdev->dev,
1130 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001131 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132}
1133
Patrick McHardy80d5c362013-04-19 02:04:28 +00001134static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135{
1136 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001137 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
1141 goto ret;
1142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301144 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001145
Somnath Kotura6b74e02014-01-21 15:50:55 +05301146 status = be_vid_config(adapter);
1147 if (status) {
1148 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001149 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301150 }
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151ret:
1152 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153}
1154
Patrick McHardy80d5c362013-04-19 02:04:28 +00001155static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001158 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001160 /* Packets with VID 0 are always received by Lancer by default */
1161 if (lancer_chip(adapter) && vid == 0)
1162 goto ret;
1163
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301165 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001166 if (!status)
1167 adapter->vlans_added--;
1168 else
1169 adapter->vlan_tag[vid] = 1;
1170ret:
1171 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172}
1173
Somnath kotur7ad09452014-03-03 14:24:43 +05301174static void be_clear_promisc(struct be_adapter *adapter)
1175{
1176 adapter->promiscuous = false;
1177 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1178
1179 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180}
1181
Sathya Perlaa54769f2011-10-24 02:45:00 +00001182static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183{
1184 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001185 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186
1187 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001188 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001189 adapter->promiscuous = true;
1190 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001192
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001193 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001194 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301195 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001196 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001197 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001198 }
1199
Sathya Perlae7b909a2009-11-22 22:01:10 +00001200 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001201 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301202 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001203 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001204 goto done;
1205 }
1206
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001207 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1208 struct netdev_hw_addr *ha;
1209 int i = 1; /* First slot is claimed by the Primary MAC */
1210
1211 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1212 be_cmd_pmac_del(adapter, adapter->if_handle,
1213 adapter->pmac_id[i], 0);
1214 }
1215
Sathya Perla92bf14a2013-08-27 16:57:32 +05301216 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001217 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1218 adapter->promiscuous = true;
1219 goto done;
1220 }
1221
1222 netdev_for_each_uc_addr(ha, adapter->netdev) {
1223 adapter->uc_macs++; /* First slot is for Primary MAC */
1224 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1225 adapter->if_handle,
1226 &adapter->pmac_id[adapter->uc_macs], 0);
1227 }
1228 }
1229
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001230 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1231
1232 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1233 if (status) {
1234 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1235 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1236 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1237 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001238done:
1239 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240}
1241
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001242static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1243{
1244 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001245 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001246 int status;
1247
Sathya Perla11ac75e2011-12-13 00:58:50 +00001248 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001249 return -EPERM;
1250
Sathya Perla11ac75e2011-12-13 00:58:50 +00001251 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001252 return -EINVAL;
1253
Sathya Perla3175d8c2013-07-23 15:25:03 +05301254 if (BEx_chip(adapter)) {
1255 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1256 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001257
Sathya Perla11ac75e2011-12-13 00:58:50 +00001258 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1259 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301260 } else {
1261 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1262 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001263 }
1264
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001265 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001266 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1267 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001268 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001269 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001270
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return status;
1272}
1273
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001274static int be_get_vf_config(struct net_device *netdev, int vf,
1275 struct ifla_vf_info *vi)
1276{
1277 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001279
Sathya Perla11ac75e2011-12-13 00:58:50 +00001280 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001281 return -EPERM;
1282
Sathya Perla11ac75e2011-12-13 00:58:50 +00001283 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001284 return -EINVAL;
1285
1286 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001287 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001288 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001290 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001291
1292 return 0;
1293}
1294
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001295static int be_set_vf_vlan(struct net_device *netdev,
1296 int vf, u16 vlan, u8 qos)
1297{
1298 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001299 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001300 int status = 0;
1301
Sathya Perla11ac75e2011-12-13 00:58:50 +00001302 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001303 return -EPERM;
1304
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001305 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001306 return -EINVAL;
1307
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001308 if (vlan || qos) {
1309 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301310 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001311 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1312 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001313 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001314 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301315 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1316 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001317 }
1318
Somnath Koturc5022242014-03-03 14:24:20 +05301319 if (!status)
1320 vf_cfg->vlan_tag = vlan;
1321 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001322 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301323 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001324 return status;
1325}
1326
Ajit Khapardee1d18732010-07-23 01:52:13 +00001327static int be_set_vf_tx_rate(struct net_device *netdev,
1328 int vf, int rate)
1329{
1330 struct be_adapter *adapter = netdev_priv(netdev);
1331 int status = 0;
1332
Sathya Perla11ac75e2011-12-13 00:58:50 +00001333 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001334 return -EPERM;
1335
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001336 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001337 return -EINVAL;
1338
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001339 if (rate < 100 || rate > 10000) {
1340 dev_err(&adapter->pdev->dev,
1341 "tx rate must be between 100 and 10000 Mbps\n");
1342 return -EINVAL;
1343 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001344
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001345 if (lancer_chip(adapter))
1346 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1347 else
1348 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001349
1350 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001351 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001352 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001353 else
1354 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001355 return status;
1356}
1357
Sathya Perla2632baf2013-10-01 16:00:00 +05301358static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1359 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360{
Sathya Perla2632baf2013-10-01 16:00:00 +05301361 aic->rx_pkts_prev = rx_pkts;
1362 aic->tx_reqs_prev = tx_pkts;
1363 aic->jiffies = now;
1364}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001365
Sathya Perla2632baf2013-10-01 16:00:00 +05301366static void be_eqd_update(struct be_adapter *adapter)
1367{
1368 struct be_set_eqd set_eqd[MAX_EVT_QS];
1369 int eqd, i, num = 0, start;
1370 struct be_aic_obj *aic;
1371 struct be_eq_obj *eqo;
1372 struct be_rx_obj *rxo;
1373 struct be_tx_obj *txo;
1374 u64 rx_pkts, tx_pkts;
1375 ulong now;
1376 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377
Sathya Perla2632baf2013-10-01 16:00:00 +05301378 for_all_evt_queues(adapter, eqo, i) {
1379 aic = &adapter->aic_obj[eqo->idx];
1380 if (!aic->enable) {
1381 if (aic->jiffies)
1382 aic->jiffies = 0;
1383 eqd = aic->et_eqd;
1384 goto modify_eqd;
1385 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386
Sathya Perla2632baf2013-10-01 16:00:00 +05301387 rxo = &adapter->rx_obj[eqo->idx];
1388 do {
1389 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1390 rx_pkts = rxo->stats.rx_pkts;
1391 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001392
Sathya Perla2632baf2013-10-01 16:00:00 +05301393 txo = &adapter->tx_obj[eqo->idx];
1394 do {
1395 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1396 tx_pkts = txo->stats.tx_reqs;
1397 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001398
Sathya Perla4097f662009-03-24 16:40:13 -07001399
Sathya Perla2632baf2013-10-01 16:00:00 +05301400 /* Skip, if wrapped around or first calculation */
1401 now = jiffies;
1402 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1403 rx_pkts < aic->rx_pkts_prev ||
1404 tx_pkts < aic->tx_reqs_prev) {
1405 be_aic_update(aic, rx_pkts, tx_pkts, now);
1406 continue;
1407 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001408
Sathya Perla2632baf2013-10-01 16:00:00 +05301409 delta = jiffies_to_msecs(now - aic->jiffies);
1410 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1411 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1412 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001413
Sathya Perla2632baf2013-10-01 16:00:00 +05301414 if (eqd < 8)
1415 eqd = 0;
1416 eqd = min_t(u32, eqd, aic->max_eqd);
1417 eqd = max_t(u32, eqd, aic->min_eqd);
1418
1419 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001420modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301421 if (eqd != aic->prev_eqd) {
1422 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1423 set_eqd[num].eq_id = eqo->q.id;
1424 aic->prev_eqd = eqd;
1425 num++;
1426 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001427 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301428
1429 if (num)
1430 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001431}
1432
Sathya Perla3abcded2010-10-03 22:12:27 -07001433static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001434 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001435{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001436 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001437
Sathya Perlaab1594e2011-07-25 19:10:15 +00001438 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001439 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001440 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001441 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001442 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001443 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001444 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001445 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001446 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447}
1448
Sathya Perla2e588f82011-03-11 02:49:26 +00001449static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001450{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001451 /* L4 checksum is not reliable for non TCP/UDP packets.
1452 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001453 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1454 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001455}
1456
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301457static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001461 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301462 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463
Sathya Perla3abcded2010-10-03 22:12:27 -07001464 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465 BUG_ON(!rx_page_info->page);
1466
Ajit Khaparde205859a2010-02-09 01:34:21 +00001467 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001468 dma_unmap_page(&adapter->pdev->dev,
1469 dma_unmap_addr(rx_page_info, bus),
1470 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001471 rx_page_info->last_page_user = false;
1472 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301474 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 atomic_dec(&rxq->used);
1476 return rx_page_info;
1477}
1478
1479/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001480static void be_rx_compl_discard(struct be_rx_obj *rxo,
1481 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001484 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001486 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301487 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001488 put_page(page_info->page);
1489 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 }
1491}
1492
1493/*
1494 * skb_fill_rx_data forms a complete skb for an ether frame
1495 * indicated by rxcp.
1496 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1498 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001501 u16 i, j;
1502 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 u8 *start;
1504
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301505 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 start = page_address(page_info->page) + page_info->page_offset;
1507 prefetch(start);
1508
1509 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001510 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 skb->len = curr_frag_len;
1513 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001514 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 /* Complete packet has now been moved to data */
1516 put_page(page_info->page);
1517 skb->data_len = 0;
1518 skb->tail += curr_frag_len;
1519 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001520 hdr_len = ETH_HLEN;
1521 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001523 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 skb_shinfo(skb)->frags[0].page_offset =
1525 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001526 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001528 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 skb->tail += hdr_len;
1530 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001531 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532
Sathya Perla2e588f82011-03-11 02:49:26 +00001533 if (rxcp->pkt_size <= rx_frag_size) {
1534 BUG_ON(rxcp->num_rcvd != 1);
1535 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 }
1537
1538 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 remaining = rxcp->pkt_size - curr_frag_len;
1540 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301541 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001542 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001544 /* Coalesce all frags from the same physical page in one slot */
1545 if (page_info->page_offset == 0) {
1546 /* Fresh page */
1547 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001548 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001549 skb_shinfo(skb)->frags[j].page_offset =
1550 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001551 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001552 skb_shinfo(skb)->nr_frags++;
1553 } else {
1554 put_page(page_info->page);
1555 }
1556
Eric Dumazet9e903e02011-10-18 21:00:24 +00001557 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 skb->len += curr_frag_len;
1559 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001560 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001561 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001562 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001564 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565}
1566
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001567/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301568static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001569 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001572 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001574
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001575 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001576 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001577 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001578 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 return;
1580 }
1581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001584 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001585 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001586 else
1587 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001589 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001590 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001591 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001592 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301593 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594
Jiri Pirko343e43c2011-08-25 02:50:51 +00001595 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001596 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001597
1598 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599}
1600
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001601/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001602static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1603 struct napi_struct *napi,
1604 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001608 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001609 u16 remaining, curr_frag_len;
1610 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001611
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001612 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001613 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001614 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001615 return;
1616 }
1617
Sathya Perla2e588f82011-03-11 02:49:26 +00001618 remaining = rxcp->pkt_size;
1619 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301620 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621
1622 curr_frag_len = min(remaining, rx_frag_size);
1623
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001624 /* Coalesce all frags from the same physical page in one slot */
1625 if (i == 0 || page_info->page_offset == 0) {
1626 /* First frag or Fresh page */
1627 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001628 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001629 skb_shinfo(skb)->frags[j].page_offset =
1630 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001631 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001632 } else {
1633 put_page(page_info->page);
1634 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001635 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001636 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 memset(page_info, 0, sizeof(*page_info));
1639 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001640 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001642 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001643 skb->len = rxcp->pkt_size;
1644 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001645 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001646 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001647 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001648 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301649 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001650
Jiri Pirko343e43c2011-08-25 02:50:51 +00001651 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001652 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001653
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001654 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655}
1656
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1658 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659{
Sathya Perla2e588f82011-03-11 02:49:26 +00001660 rxcp->pkt_size =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1662 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1663 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1664 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001665 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001666 rxcp->ip_csum =
1667 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1668 rxcp->l4_csum =
1669 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1670 rxcp->ipv6 =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001672 rxcp->num_rcvd =
1673 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1674 rxcp->pkt_type =
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001676 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001677 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001678 if (rxcp->vlanf) {
1679 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001680 compl);
1681 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1682 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001683 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001684 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001685}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001687static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1688 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001689{
1690 rxcp->pkt_size =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1692 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1693 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1694 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001695 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001696 rxcp->ip_csum =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1698 rxcp->l4_csum =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1700 rxcp->ipv6 =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001702 rxcp->num_rcvd =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1704 rxcp->pkt_type =
1705 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001706 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001707 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001708 if (rxcp->vlanf) {
1709 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001710 compl);
1711 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1712 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001713 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001714 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001715 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1716 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001717}
1718
1719static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1720{
1721 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1722 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1723 struct be_adapter *adapter = rxo->adapter;
1724
1725 /* For checking the valid bit it is Ok to use either definition as the
1726 * valid bit is at the same position in both v0 and v1 Rx compl */
1727 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 return NULL;
1729
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001730 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001731 be_dws_le_to_cpu(compl, sizeof(*compl));
1732
1733 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001734 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001735 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001736 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001737
Somnath Koture38b1702013-05-29 22:55:56 +00001738 if (rxcp->ip_frag)
1739 rxcp->l4_csum = 0;
1740
Sathya Perla15d72182011-03-21 20:49:26 +00001741 if (rxcp->vlanf) {
1742 /* vlanf could be wrongly set in some cards.
1743 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001744 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001745 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001746
Sathya Perla15d72182011-03-21 20:49:26 +00001747 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001748 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001749
Somnath Kotur939cf302011-08-18 21:51:49 -07001750 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001751 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001752 rxcp->vlanf = 0;
1753 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001754
1755 /* As the compl has been parsed, reset it; we wont touch it again */
1756 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757
Sathya Perla3abcded2010-10-03 22:12:27 -07001758 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759 return rxcp;
1760}
1761
Eric Dumazet1829b082011-03-01 05:48:12 +00001762static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001767 gfp |= __GFP_COMP;
1768 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769}
1770
1771/*
1772 * Allocate a page, split it to fragments of size rx_frag_size and post as
1773 * receive buffers to BE
1774 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001775static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776{
Sathya Perla3abcded2010-10-03 22:12:27 -07001777 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001778 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001779 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001781 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 struct be_eth_rx_d *rxd;
1783 u64 page_dmaaddr = 0, frag_dmaaddr;
1784 u32 posted, page_offset = 0;
1785
Sathya Perla3abcded2010-10-03 22:12:27 -07001786 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1788 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001789 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001791 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792 break;
1793 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001794 page_dmaaddr = dma_map_page(dev, pagep, 0,
1795 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001796 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001797 if (dma_mapping_error(dev, page_dmaaddr)) {
1798 put_page(pagep);
1799 pagep = NULL;
1800 rx_stats(rxo)->rx_post_fail++;
1801 break;
1802 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803 page_info->page_offset = 0;
1804 } else {
1805 get_page(pagep);
1806 page_info->page_offset = page_offset + rx_frag_size;
1807 }
1808 page_offset = page_info->page_offset;
1809 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001810 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1812
1813 rxd = queue_head_node(rxq);
1814 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1815 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816
1817 /* Any space left in the current big page for another frag? */
1818 if ((page_offset + rx_frag_size + rx_frag_size) >
1819 adapter->big_page_size) {
1820 pagep = NULL;
1821 page_info->last_page_user = true;
1822 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001823
1824 prev_page_info = page_info;
1825 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827 }
1828 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001829 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830
1831 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301833 if (rxo->rx_post_starved)
1834 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001835 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001836 } else if (atomic_read(&rxq->used) == 0) {
1837 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840}
1841
Sathya Perla5fb379e2009-06-18 00:02:59 +00001842static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1845
1846 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1847 return NULL;
1848
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001849 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1851
1852 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1853
1854 queue_tail_inc(tx_cq);
1855 return txcp;
1856}
1857
Sathya Perla3c8def92011-06-12 20:01:58 +00001858static u16 be_tx_compl_process(struct be_adapter *adapter,
1859 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860{
Sathya Perla3c8def92011-06-12 20:01:58 +00001861 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001862 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001863 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001865 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1866 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001868 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001870 sent_skbs[txq->tail] = NULL;
1871
1872 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001873 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001875 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001877 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001878 unmap_tx_frag(&adapter->pdev->dev, wrb,
1879 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001880 unmap_skb_hdr = false;
1881
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 num_wrbs++;
1883 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001884 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001887 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888}
1889
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890/* Return the number of events in the event queue */
1891static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001892{
1893 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001895
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001896 do {
1897 eqe = queue_tail_node(&eqo->q);
1898 if (eqe->evt == 0)
1899 break;
1900
1901 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001902 eqe->evt = 0;
1903 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001904 queue_tail_inc(&eqo->q);
1905 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001906
1907 return num;
1908}
1909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910/* Leaves the EQ is disarmed state */
1911static void be_eq_clean(struct be_eq_obj *eqo)
1912{
1913 int num = events_get(eqo);
1914
1915 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1916}
1917
1918static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919{
1920 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001921 struct be_queue_info *rxq = &rxo->q;
1922 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001923 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001924 struct be_adapter *adapter = rxo->adapter;
1925 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
Sathya Perlad23e9462012-12-17 19:38:51 +00001927 /* Consume pending rx completions.
1928 * Wait for the flush completion (identified by zero num_rcvd)
1929 * to arrive. Notify CQ even when there are no more CQ entries
1930 * for HW to flush partially coalesced CQ entries.
1931 * In Lancer, there is no need to wait for flush compl.
1932 */
1933 for (;;) {
1934 rxcp = be_rx_compl_get(rxo);
1935 if (rxcp == NULL) {
1936 if (lancer_chip(adapter))
1937 break;
1938
1939 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1940 dev_warn(&adapter->pdev->dev,
1941 "did not receive flush compl\n");
1942 break;
1943 }
1944 be_cq_notify(adapter, rx_cq->id, true, 0);
1945 mdelay(1);
1946 } else {
1947 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001948 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001949 if (rxcp->num_rcvd == 0)
1950 break;
1951 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 }
1953
Sathya Perlad23e9462012-12-17 19:38:51 +00001954 /* After cleanup, leave the CQ in unarmed state */
1955 be_cq_notify(adapter, rx_cq->id, false, 0);
1956
1957 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301958 while (atomic_read(&rxq->used) > 0) {
1959 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 put_page(page_info->page);
1961 memset(page_info, 0, sizeof(*page_info));
1962 }
1963 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001964 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965}
1966
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001967static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001969 struct be_tx_obj *txo;
1970 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001971 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001972 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001973 struct sk_buff *sent_skb;
1974 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001975 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perlaa8e91792009-08-10 03:42:43 +00001977 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1978 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001979 pending_txqs = adapter->num_tx_qs;
1980
1981 for_all_tx_queues(adapter, txo, i) {
1982 txq = &txo->q;
1983 while ((txcp = be_tx_compl_get(&txo->cq))) {
1984 end_idx =
1985 AMAP_GET_BITS(struct amap_eth_tx_compl,
1986 wrb_index, txcp);
1987 num_wrbs += be_tx_compl_process(adapter, txo,
1988 end_idx);
1989 cmpl++;
1990 }
1991 if (cmpl) {
1992 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1993 atomic_sub(num_wrbs, &txq->used);
1994 cmpl = 0;
1995 num_wrbs = 0;
1996 }
1997 if (atomic_read(&txq->used) == 0)
1998 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001999 }
2000
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002001 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00002002 break;
2003
2004 mdelay(1);
2005 } while (true);
2006
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002007 for_all_tx_queues(adapter, txo, i) {
2008 txq = &txo->q;
2009 if (atomic_read(&txq->used))
2010 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2011 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002012
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002013 /* free posted tx for which compls will never arrive */
2014 while (atomic_read(&txq->used)) {
2015 sent_skb = txo->sent_skb_list[txq->tail];
2016 end_idx = txq->tail;
2017 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2018 &dummy_wrb);
2019 index_adv(&end_idx, num_wrbs - 1, txq->len);
2020 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2021 atomic_sub(num_wrbs, &txq->used);
2022 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002023 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024}
2025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026static void be_evt_queues_destroy(struct be_adapter *adapter)
2027{
2028 struct be_eq_obj *eqo;
2029 int i;
2030
2031 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002032 if (eqo->q.created) {
2033 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302035 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302036 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002037 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 be_queue_free(adapter, &eqo->q);
2039 }
2040}
2041
2042static int be_evt_queues_create(struct be_adapter *adapter)
2043{
2044 struct be_queue_info *eq;
2045 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302046 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002047 int i, rc;
2048
Sathya Perla92bf14a2013-08-27 16:57:32 +05302049 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2050 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051
2052 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302053 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2054 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302055 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302056 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 eqo->adapter = adapter;
2058 eqo->tx_budget = BE_TX_BUDGET;
2059 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302060 aic->max_eqd = BE_MAX_EQD;
2061 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062
2063 eq = &eqo->q;
2064 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2065 sizeof(struct be_eq_entry));
2066 if (rc)
2067 return rc;
2068
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302069 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070 if (rc)
2071 return rc;
2072 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002073 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002074}
2075
Sathya Perla5fb379e2009-06-18 00:02:59 +00002076static void be_mcc_queues_destroy(struct be_adapter *adapter)
2077{
2078 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002079
Sathya Perla8788fdc2009-07-27 22:52:03 +00002080 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002081 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002082 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002083 be_queue_free(adapter, q);
2084
Sathya Perla8788fdc2009-07-27 22:52:03 +00002085 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002086 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002087 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002088 be_queue_free(adapter, q);
2089}
2090
2091/* Must be called only after TX qs are created as MCC shares TX EQ */
2092static int be_mcc_queues_create(struct be_adapter *adapter)
2093{
2094 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002095
Sathya Perla8788fdc2009-07-27 22:52:03 +00002096 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002097 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002098 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002099 goto err;
2100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 /* Use the default EQ for MCC completions */
2102 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002103 goto mcc_cq_free;
2104
Sathya Perla8788fdc2009-07-27 22:52:03 +00002105 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002106 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2107 goto mcc_cq_destroy;
2108
Sathya Perla8788fdc2009-07-27 22:52:03 +00002109 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002110 goto mcc_q_free;
2111
2112 return 0;
2113
2114mcc_q_free:
2115 be_queue_free(adapter, q);
2116mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002117 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002118mcc_cq_free:
2119 be_queue_free(adapter, cq);
2120err:
2121 return -1;
2122}
2123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124static void be_tx_queues_destroy(struct be_adapter *adapter)
2125{
2126 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002127 struct be_tx_obj *txo;
2128 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
Sathya Perla3c8def92011-06-12 20:01:58 +00002130 for_all_tx_queues(adapter, txo, i) {
2131 q = &txo->q;
2132 if (q->created)
2133 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2134 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135
Sathya Perla3c8def92011-06-12 20:01:58 +00002136 q = &txo->cq;
2137 if (q->created)
2138 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2139 be_queue_free(adapter, q);
2140 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141}
2142
Sathya Perla77071332013-08-27 16:57:34 +05302143static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002146 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302147 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002148
Sathya Perla92bf14a2013-08-27 16:57:32 +05302149 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002150
Sathya Perla3c8def92011-06-12 20:01:58 +00002151 for_all_tx_queues(adapter, txo, i) {
2152 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2154 sizeof(struct be_eth_tx_compl));
2155 if (status)
2156 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157
John Stultz827da442013-10-07 15:51:58 -07002158 u64_stats_init(&txo->stats.sync);
2159 u64_stats_init(&txo->stats.sync_compl);
2160
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161 /* If num_evt_qs is less than num_tx_qs, then more than
2162 * one txq share an eq
2163 */
2164 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2165 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2166 if (status)
2167 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2170 sizeof(struct be_eth_wrb));
2171 if (status)
2172 return status;
2173
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002174 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175 if (status)
2176 return status;
2177 }
2178
Sathya Perlad3791422012-09-28 04:39:44 +00002179 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2180 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002181 return 0;
2182}
2183
2184static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185{
2186 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002187 struct be_rx_obj *rxo;
2188 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
Sathya Perla3abcded2010-10-03 22:12:27 -07002190 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002191 q = &rxo->cq;
2192 if (q->created)
2193 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2194 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196}
2197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002199{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 struct be_rx_obj *rxo;
2202 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Sathya Perla92bf14a2013-08-27 16:57:32 +05302204 /* We can create as many RSS rings as there are EQs. */
2205 adapter->num_rx_qs = adapter->num_evt_qs;
2206
2207 /* We'll use RSS only if atleast 2 RSS rings are supported.
2208 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302210 if (adapter->num_rx_qs > 1)
2211 adapter->num_rx_qs++;
2212
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 for_all_rx_queues(adapter, rxo, i) {
2215 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 cq = &rxo->cq;
2217 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2218 sizeof(struct be_eth_rx_compl));
2219 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
John Stultz827da442013-10-07 15:51:58 -07002222 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2224 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002225 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002227 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sathya Perlad3791422012-09-28 04:39:44 +00002229 dev_info(&adapter->pdev->dev,
2230 "created %d RSS queue(s) and 1 default RX queue\n",
2231 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002233}
2234
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235static irqreturn_t be_intx(int irq, void *dev)
2236{
Sathya Perlae49cc342012-11-27 19:50:02 +00002237 struct be_eq_obj *eqo = dev;
2238 struct be_adapter *adapter = eqo->adapter;
2239 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002241 /* IRQ is not expected when NAPI is scheduled as the EQ
2242 * will not be armed.
2243 * But, this can happen on Lancer INTx where it takes
2244 * a while to de-assert INTx or in BE2 where occasionaly
2245 * an interrupt may be raised even when EQ is unarmed.
2246 * If NAPI is already scheduled, then counting & notifying
2247 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002248 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002249 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002250 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002251 __napi_schedule(&eqo->napi);
2252 if (num_evts)
2253 eqo->spurious_intr = 0;
2254 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002255 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002256
2257 /* Return IRQ_HANDLED only for the the first spurious intr
2258 * after a valid intr to stop the kernel from branding
2259 * this irq as a bad one!
2260 */
2261 if (num_evts || eqo->spurious_intr++ == 0)
2262 return IRQ_HANDLED;
2263 else
2264 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265}
2266
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270
Sathya Perla0b545a62012-11-23 00:27:18 +00002271 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2272 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 return IRQ_HANDLED;
2274}
2275
Sathya Perla2e588f82011-03-11 02:49:26 +00002276static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277{
Somnath Koture38b1702013-05-29 22:55:56 +00002278 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279}
2280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302282 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283{
Sathya Perla3abcded2010-10-03 22:12:27 -07002284 struct be_adapter *adapter = rxo->adapter;
2285 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002286 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287 u32 work_done;
2288
2289 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291 if (!rxcp)
2292 break;
2293
Sathya Perla12004ae2011-08-02 19:57:46 +00002294 /* Is it a flush compl that has no data */
2295 if (unlikely(rxcp->num_rcvd == 0))
2296 goto loop_continue;
2297
2298 /* Discard compl with partial DMA Lancer B0 */
2299 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002301 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002302 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002303
Sathya Perla12004ae2011-08-02 19:57:46 +00002304 /* On BE drop pkts that arrive due to imperfect filtering in
2305 * promiscuous mode on some skews
2306 */
2307 if (unlikely(rxcp->port != adapter->port_num &&
2308 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002310 goto loop_continue;
2311 }
2312
Sathya Perla6384a4d2013-10-25 10:40:16 +05302313 /* Don't do gro when we're busy_polling */
2314 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002316 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302317 be_rx_compl_process(rxo, napi, rxcp);
2318
Sathya Perla12004ae2011-08-02 19:57:46 +00002319loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002320 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 }
2322
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 if (work_done) {
2324 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002325
Sathya Perla6384a4d2013-10-25 10:40:16 +05302326 /* When an rx-obj gets into post_starved state, just
2327 * let be_worker do the posting.
2328 */
2329 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2330 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334 return work_done;
2335}
2336
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2338 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 for (work_done = 0; work_done < budget; work_done++) {
2344 txcp = be_tx_compl_get(&txo->cq);
2345 if (!txcp)
2346 break;
2347 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002348 AMAP_GET_BITS(struct amap_eth_tx_compl,
2349 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 }
2351
2352 if (work_done) {
2353 be_cq_notify(adapter, txo->cq.id, true, work_done);
2354 atomic_sub(num_wrbs, &txo->q.used);
2355
2356 /* As Tx wrbs have been freed up, wake up netdev queue
2357 * if it was stopped due to lack of tx wrbs. */
2358 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2359 atomic_read(&txo->q.used) < txo->q.len / 2) {
2360 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002361 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002362
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2364 tx_stats(txo)->tx_compl += work_done;
2365 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2366 }
2367 return (work_done < budget); /* Done */
2368}
Sathya Perla3c8def92011-06-12 20:01:58 +00002369
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302370int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371{
2372 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2373 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002374 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302375 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002377
Sathya Perla0b545a62012-11-23 00:27:18 +00002378 num_evts = events_get(eqo);
2379
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 /* Process all TXQs serviced by this EQ */
2381 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2382 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2383 eqo->tx_budget, i);
2384 if (!tx_done)
2385 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386 }
2387
Sathya Perla6384a4d2013-10-25 10:40:16 +05302388 if (be_lock_napi(eqo)) {
2389 /* This loop will iterate twice for EQ0 in which
2390 * completions of the last RXQ (default one) are also processed
2391 * For other EQs the loop iterates only once
2392 */
2393 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2394 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2395 max_work = max(work, max_work);
2396 }
2397 be_unlock_napi(eqo);
2398 } else {
2399 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002400 }
2401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 if (is_mcc_eqo(eqo))
2403 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002404
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 if (max_work < budget) {
2406 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002407 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408 } else {
2409 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002410 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002411 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002412 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413}
2414
Sathya Perla6384a4d2013-10-25 10:40:16 +05302415#ifdef CONFIG_NET_RX_BUSY_POLL
2416static int be_busy_poll(struct napi_struct *napi)
2417{
2418 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2419 struct be_adapter *adapter = eqo->adapter;
2420 struct be_rx_obj *rxo;
2421 int i, work = 0;
2422
2423 if (!be_lock_busy_poll(eqo))
2424 return LL_FLUSH_BUSY;
2425
2426 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2427 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2428 if (work)
2429 break;
2430 }
2431
2432 be_unlock_busy_poll(eqo);
2433 return work;
2434}
2435#endif
2436
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002437void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002438{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002439 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2440 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002441 u32 i;
2442
Sathya Perlad23e9462012-12-17 19:38:51 +00002443 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002444 return;
2445
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002446 if (lancer_chip(adapter)) {
2447 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2448 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2449 sliport_err1 = ioread32(adapter->db +
2450 SLIPORT_ERROR1_OFFSET);
2451 sliport_err2 = ioread32(adapter->db +
2452 SLIPORT_ERROR2_OFFSET);
2453 }
2454 } else {
2455 pci_read_config_dword(adapter->pdev,
2456 PCICFG_UE_STATUS_LOW, &ue_lo);
2457 pci_read_config_dword(adapter->pdev,
2458 PCICFG_UE_STATUS_HIGH, &ue_hi);
2459 pci_read_config_dword(adapter->pdev,
2460 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2461 pci_read_config_dword(adapter->pdev,
2462 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002463
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002464 ue_lo = (ue_lo & ~ue_lo_mask);
2465 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002466 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002467
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002468 /* On certain platforms BE hardware can indicate spurious UEs.
2469 * Allow the h/w to stop working completely in case of a real UE.
2470 * Hence not setting the hw_error for UE detection.
2471 */
2472 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002473 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302474 /* Do not log error messages if its a FW reset */
2475 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2476 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2477 dev_info(&adapter->pdev->dev,
2478 "Firmware update in progress\n");
2479 return;
2480 } else {
2481 dev_err(&adapter->pdev->dev,
2482 "Error detected in the card\n");
2483 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002484 }
2485
2486 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2487 dev_err(&adapter->pdev->dev,
2488 "ERR: sliport status 0x%x\n", sliport_status);
2489 dev_err(&adapter->pdev->dev,
2490 "ERR: sliport error1 0x%x\n", sliport_err1);
2491 dev_err(&adapter->pdev->dev,
2492 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002493 }
2494
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002495 if (ue_lo) {
2496 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2497 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002498 dev_err(&adapter->pdev->dev,
2499 "UE: %s bit set\n", ue_status_low_desc[i]);
2500 }
2501 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002502
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002503 if (ue_hi) {
2504 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2505 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002506 dev_err(&adapter->pdev->dev,
2507 "UE: %s bit set\n", ue_status_hi_desc[i]);
2508 }
2509 }
2510
2511}
2512
Sathya Perla8d56ff12009-11-22 22:02:26 +00002513static void be_msix_disable(struct be_adapter *adapter)
2514{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002515 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002516 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002517 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302518 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002519 }
2520}
2521
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002522static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302524 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002525 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526
Sathya Perla92bf14a2013-08-27 16:57:32 +05302527 /* If RoCE is supported, program the max number of NIC vectors that
2528 * may be configured via set-channels, along with vectors needed for
2529 * RoCe. Else, just program the number we'll use initially.
2530 */
2531 if (be_roce_supported(adapter))
2532 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2533 2 * num_online_cpus());
2534 else
2535 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002536
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002537 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538 adapter->msix_entries[i].entry = i;
2539
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002540 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002541 if (status == 0) {
2542 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302543 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002544 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002545 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2546 num_vec);
2547 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002548 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002549 }
Sathya Perlad3791422012-09-28 04:39:44 +00002550
2551 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302552
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002553 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2554 if (!be_physfn(adapter))
2555 return status;
2556 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002557done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302558 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2559 adapter->num_msix_roce_vec = num_vec / 2;
2560 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2561 adapter->num_msix_roce_vec);
2562 }
2563
2564 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2565
2566 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2567 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002568 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569}
2570
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002571static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002573{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302574 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575}
2576
2577static int be_msix_register(struct be_adapter *adapter)
2578{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002579 struct net_device *netdev = adapter->netdev;
2580 struct be_eq_obj *eqo;
2581 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002583 for_all_evt_queues(adapter, eqo, i) {
2584 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2585 vec = be_msix_vec_get(adapter, eqo);
2586 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002587 if (status)
2588 goto err_msix;
2589 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002590
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002592err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002593 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2594 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2595 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2596 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002597 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598 return status;
2599}
2600
2601static int be_irq_register(struct be_adapter *adapter)
2602{
2603 struct net_device *netdev = adapter->netdev;
2604 int status;
2605
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002606 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607 status = be_msix_register(adapter);
2608 if (status == 0)
2609 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002610 /* INTx is not supported for VF */
2611 if (!be_physfn(adapter))
2612 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002613 }
2614
Sathya Perlae49cc342012-11-27 19:50:02 +00002615 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616 netdev->irq = adapter->pdev->irq;
2617 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002618 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619 if (status) {
2620 dev_err(&adapter->pdev->dev,
2621 "INTx request IRQ failed - err %d\n", status);
2622 return status;
2623 }
2624done:
2625 adapter->isr_registered = true;
2626 return 0;
2627}
2628
2629static void be_irq_unregister(struct be_adapter *adapter)
2630{
2631 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002633 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634
2635 if (!adapter->isr_registered)
2636 return;
2637
2638 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002639 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002640 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641 goto done;
2642 }
2643
2644 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 for_all_evt_queues(adapter, eqo, i)
2646 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002647
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648done:
2649 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650}
2651
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002652static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002653{
2654 struct be_queue_info *q;
2655 struct be_rx_obj *rxo;
2656 int i;
2657
2658 for_all_rx_queues(adapter, rxo, i) {
2659 q = &rxo->q;
2660 if (q->created) {
2661 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002663 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002665 }
2666}
2667
Sathya Perla889cd4b2010-05-30 23:33:45 +00002668static int be_close(struct net_device *netdev)
2669{
2670 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002671 struct be_eq_obj *eqo;
2672 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002673
Parav Pandit045508a2012-03-26 14:27:13 +00002674 be_roce_dev_close(adapter);
2675
Ivan Veceradff345c52013-11-27 08:59:32 +01002676 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2677 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002678 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302679 be_disable_busy_poll(eqo);
2680 }
David S. Miller71237b62013-11-28 18:53:36 -05002681 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002682 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002683
2684 be_async_mcc_disable(adapter);
2685
2686 /* Wait for all pending tx completions to arrive so that
2687 * all tx skbs are freed.
2688 */
Sathya Perlafba87552013-05-08 02:05:50 +00002689 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302690 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002691
2692 be_rx_qs_destroy(adapter);
2693
Ajit Khaparded11a3472013-11-18 10:44:37 -06002694 for (i = 1; i < (adapter->uc_macs + 1); i++)
2695 be_cmd_pmac_del(adapter, adapter->if_handle,
2696 adapter->pmac_id[i], 0);
2697 adapter->uc_macs = 0;
2698
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002699 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 if (msix_enabled(adapter))
2701 synchronize_irq(be_msix_vec_get(adapter, eqo));
2702 else
2703 synchronize_irq(netdev->irq);
2704 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002705 }
2706
Sathya Perla889cd4b2010-05-30 23:33:45 +00002707 be_irq_unregister(adapter);
2708
Sathya Perla482c9e72011-06-29 23:33:17 +00002709 return 0;
2710}
2711
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002713{
2714 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002715 int rc, i, j;
2716 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002717
2718 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2720 sizeof(struct be_eth_rx_d));
2721 if (rc)
2722 return rc;
2723 }
2724
2725 /* The FW would like the default RXQ to be created first */
2726 rxo = default_rxo(adapter);
2727 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2728 adapter->if_handle, false, &rxo->rss_id);
2729 if (rc)
2730 return rc;
2731
2732 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002733 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002734 rx_frag_size, adapter->if_handle,
2735 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002736 if (rc)
2737 return rc;
2738 }
2739
2740 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002741 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2742 for_all_rss_queues(adapter, rxo, i) {
2743 if ((j + i) >= 128)
2744 break;
2745 rsstable[j + i] = rxo->rss_id;
2746 }
2747 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002748 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2749 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2750
2751 if (!BEx_chip(adapter))
2752 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2753 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302754 } else {
2755 /* Disable RSS, if only default RX Q is created */
2756 adapter->rss_flags = RSS_ENABLE_NONE;
2757 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002758
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302759 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2760 128);
2761 if (rc) {
2762 adapter->rss_flags = RSS_ENABLE_NONE;
2763 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002764 }
2765
2766 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002767 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002768 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002769 return 0;
2770}
2771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002772static int be_open(struct net_device *netdev)
2773{
2774 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002775 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002776 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002777 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002778 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002779 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002780
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002782 if (status)
2783 goto err;
2784
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002785 status = be_irq_register(adapter);
2786 if (status)
2787 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002789 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002790 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002791
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002792 for_all_tx_queues(adapter, txo, i)
2793 be_cq_notify(adapter, txo->cq.id, true, 0);
2794
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002795 be_async_mcc_enable(adapter);
2796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002797 for_all_evt_queues(adapter, eqo, i) {
2798 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302799 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2801 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002802 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803
Sathya Perla323ff712012-09-28 04:39:43 +00002804 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002805 if (!status)
2806 be_link_status_update(adapter, link_status);
2807
Sathya Perlafba87552013-05-08 02:05:50 +00002808 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002809 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002810 return 0;
2811err:
2812 be_close(adapter->netdev);
2813 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002814}
2815
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002816static int be_setup_wol(struct be_adapter *adapter, bool enable)
2817{
2818 struct be_dma_mem cmd;
2819 int status = 0;
2820 u8 mac[ETH_ALEN];
2821
2822 memset(mac, 0, ETH_ALEN);
2823
2824 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002825 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2826 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002827 if (cmd.va == NULL)
2828 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002829
2830 if (enable) {
2831 status = pci_write_config_dword(adapter->pdev,
2832 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2833 if (status) {
2834 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002835 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002836 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2837 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002838 return status;
2839 }
2840 status = be_cmd_enable_magic_wol(adapter,
2841 adapter->netdev->dev_addr, &cmd);
2842 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2843 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2844 } else {
2845 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2846 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2847 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2848 }
2849
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002850 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002851 return status;
2852}
2853
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002854/*
2855 * Generate a seed MAC address from the PF MAC Address using jhash.
2856 * MAC Address for VFs are assigned incrementally starting from the seed.
2857 * These addresses are programmed in the ASIC by the PF and the VF driver
2858 * queries for the MAC address during its probe.
2859 */
Sathya Perla4c876612013-02-03 20:30:11 +00002860static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002861{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002862 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002863 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002864 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002865 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002866
2867 be_vf_eth_addr_generate(adapter, mac);
2868
Sathya Perla11ac75e2011-12-13 00:58:50 +00002869 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302870 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002871 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002872 vf_cfg->if_handle,
2873 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302874 else
2875 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2876 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002877
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002878 if (status)
2879 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002880 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002881 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002882 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002883
2884 mac[5] += 1;
2885 }
2886 return status;
2887}
2888
Sathya Perla4c876612013-02-03 20:30:11 +00002889static int be_vfs_mac_query(struct be_adapter *adapter)
2890{
2891 int status, vf;
2892 u8 mac[ETH_ALEN];
2893 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002894
2895 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302896 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2897 mac, vf_cfg->if_handle,
2898 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002899 if (status)
2900 return status;
2901 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2902 }
2903 return 0;
2904}
2905
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002906static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002907{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002908 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002909 u32 vf;
2910
Sathya Perla257a3fe2013-06-14 15:54:51 +05302911 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002912 dev_warn(&adapter->pdev->dev,
2913 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002914 goto done;
2915 }
2916
Sathya Perlab4c1df92013-05-08 02:05:47 +00002917 pci_disable_sriov(adapter->pdev);
2918
Sathya Perla11ac75e2011-12-13 00:58:50 +00002919 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302920 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002921 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2922 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302923 else
2924 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2925 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002926
Sathya Perla11ac75e2011-12-13 00:58:50 +00002927 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2928 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002929done:
2930 kfree(adapter->vf_cfg);
2931 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002932}
2933
Sathya Perla77071332013-08-27 16:57:34 +05302934static void be_clear_queues(struct be_adapter *adapter)
2935{
2936 be_mcc_queues_destroy(adapter);
2937 be_rx_cqs_destroy(adapter);
2938 be_tx_queues_destroy(adapter);
2939 be_evt_queues_destroy(adapter);
2940}
2941
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302942static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002943{
Sathya Perla191eb752012-02-23 18:50:13 +00002944 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2945 cancel_delayed_work_sync(&adapter->work);
2946 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2947 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302948}
2949
Somnath Koturb05004a2013-12-05 12:08:16 +05302950static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302951{
2952 int i;
2953
Somnath Koturb05004a2013-12-05 12:08:16 +05302954 if (adapter->pmac_id) {
2955 for (i = 0; i < (adapter->uc_macs + 1); i++)
2956 be_cmd_pmac_del(adapter, adapter->if_handle,
2957 adapter->pmac_id[i], 0);
2958 adapter->uc_macs = 0;
2959
2960 kfree(adapter->pmac_id);
2961 adapter->pmac_id = NULL;
2962 }
2963}
2964
2965static int be_clear(struct be_adapter *adapter)
2966{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302967 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002968
Sathya Perla11ac75e2011-12-13 00:58:50 +00002969 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002970 be_vf_clear(adapter);
2971
Sathya Perla2d17f402013-07-23 15:25:04 +05302972 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302973 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002974
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002975 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002976
Sathya Perla77071332013-08-27 16:57:34 +05302977 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002978
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002979 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002980 return 0;
2981}
2982
Sathya Perla4c876612013-02-03 20:30:11 +00002983static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002984{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302985 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002986 struct be_vf_cfg *vf_cfg;
2987 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002988 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002989
Sathya Perla4c876612013-02-03 20:30:11 +00002990 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2991 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002992
Sathya Perla4c876612013-02-03 20:30:11 +00002993 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302994 if (!BE3_chip(adapter)) {
2995 status = be_cmd_get_profile_config(adapter, &res,
2996 vf + 1);
2997 if (!status)
2998 cap_flags = res.if_cap_flags;
2999 }
Sathya Perla4c876612013-02-03 20:30:11 +00003000
3001 /* If a FW profile exists, then cap_flags are updated */
3002 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3003 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3004 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3005 &vf_cfg->if_handle, vf + 1);
3006 if (status)
3007 goto err;
3008 }
3009err:
3010 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003011}
3012
Sathya Perla39f1d942012-05-08 19:41:24 +00003013static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003014{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003015 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003016 int vf;
3017
Sathya Perla39f1d942012-05-08 19:41:24 +00003018 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3019 GFP_KERNEL);
3020 if (!adapter->vf_cfg)
3021 return -ENOMEM;
3022
Sathya Perla11ac75e2011-12-13 00:58:50 +00003023 for_all_vfs(adapter, vf_cfg, vf) {
3024 vf_cfg->if_handle = -1;
3025 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003026 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003027 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003028}
3029
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003030static int be_vf_setup(struct be_adapter *adapter)
3031{
Sathya Perla4c876612013-02-03 20:30:11 +00003032 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303033 struct be_vf_cfg *vf_cfg;
3034 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303035 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303036 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003037
Sathya Perla257a3fe2013-06-14 15:54:51 +05303038 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003039 if (old_vfs) {
3040 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3041 if (old_vfs != num_vfs)
3042 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3043 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003044 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303045 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003046 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303047 be_max_vfs(adapter), num_vfs);
3048 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003049 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003050 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003051 }
3052
3053 status = be_vf_setup_init(adapter);
3054 if (status)
3055 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003056
Sathya Perla4c876612013-02-03 20:30:11 +00003057 if (old_vfs) {
3058 for_all_vfs(adapter, vf_cfg, vf) {
3059 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3060 if (status)
3061 goto err;
3062 }
3063 } else {
3064 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003065 if (status)
3066 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003067 }
3068
Sathya Perla4c876612013-02-03 20:30:11 +00003069 if (old_vfs) {
3070 status = be_vfs_mac_query(adapter);
3071 if (status)
3072 goto err;
3073 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003074 status = be_vf_eth_addr_config(adapter);
3075 if (status)
3076 goto err;
3077 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003078
Sathya Perla11ac75e2011-12-13 00:58:50 +00003079 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303080 /* Allow VFs to programs MAC/VLAN filters */
3081 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3082 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3083 status = be_cmd_set_fn_privileges(adapter,
3084 privileges |
3085 BE_PRIV_FILTMGMT,
3086 vf + 1);
3087 if (!status)
3088 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3089 vf);
3090 }
3091
Sathya Perla4c876612013-02-03 20:30:11 +00003092 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3093 * Allow full available bandwidth
3094 */
3095 if (BE3_chip(adapter) && !old_vfs)
3096 be_cmd_set_qos(adapter, 1000, vf+1);
3097
3098 status = be_cmd_link_status_query(adapter, &lnk_speed,
3099 NULL, vf + 1);
3100 if (!status)
3101 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003102
Vasundhara Volam05998632013-10-01 15:59:59 +05303103 if (!old_vfs)
3104 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003105 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003106
3107 if (!old_vfs) {
3108 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3109 if (status) {
3110 dev_err(dev, "SRIOV enable failed\n");
3111 adapter->num_vfs = 0;
3112 goto err;
3113 }
3114 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003115 return 0;
3116err:
Sathya Perla4c876612013-02-03 20:30:11 +00003117 dev_err(dev, "VF setup failed\n");
3118 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003119 return status;
3120}
3121
Sathya Perla92bf14a2013-08-27 16:57:32 +05303122/* On BE2/BE3 FW does not suggest the supported limits */
3123static void BEx_get_resources(struct be_adapter *adapter,
3124 struct be_resources *res)
3125{
3126 struct pci_dev *pdev = adapter->pdev;
3127 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303128 int max_vfs;
3129
3130 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303131
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303132 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303133 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303134 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303135 }
3136
3137 if (be_physfn(adapter))
3138 res->max_uc_mac = BE_UC_PMAC_COUNT;
3139 else
3140 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3141
3142 if (adapter->function_mode & FLEX10_MODE)
3143 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003144 else if (adapter->function_mode & UMC_ENABLED)
3145 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303146 else
3147 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3148 res->max_mcast_mac = BE_MAX_MC;
3149
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303150 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303151 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303152 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303153 res->max_tx_qs = 1;
3154 else
3155 res->max_tx_qs = BE3_MAX_TX_QS;
3156
3157 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3158 !use_sriov && be_physfn(adapter))
3159 res->max_rss_qs = (adapter->be3_native) ?
3160 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3161 res->max_rx_qs = res->max_rss_qs + 1;
3162
Suresh Reddye3dc8672014-01-06 13:02:25 +05303163 if (be_physfn(adapter))
3164 res->max_evt_qs = (max_vfs > 0) ?
3165 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3166 else
3167 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303168
3169 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3170 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3171 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3172}
3173
Sathya Perla30128032011-11-10 19:17:57 +00003174static void be_setup_init(struct be_adapter *adapter)
3175{
3176 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003177 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003178 adapter->if_handle = -1;
3179 adapter->be3_native = false;
3180 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003181 if (be_physfn(adapter))
3182 adapter->cmd_privileges = MAX_PRIVILEGES;
3183 else
3184 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003185}
3186
Sathya Perla92bf14a2013-08-27 16:57:32 +05303187static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003188{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303189 struct device *dev = &adapter->pdev->dev;
3190 struct be_resources res = {0};
3191 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003192
Sathya Perla92bf14a2013-08-27 16:57:32 +05303193 if (BEx_chip(adapter)) {
3194 BEx_get_resources(adapter, &res);
3195 adapter->res = res;
3196 }
3197
Sathya Perla92bf14a2013-08-27 16:57:32 +05303198 /* For Lancer, SH etc read per-function resource limits from FW.
3199 * GET_FUNC_CONFIG returns per function guaranteed limits.
3200 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3201 */
Sathya Perla4c876612013-02-03 20:30:11 +00003202 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303203 status = be_cmd_get_func_config(adapter, &res);
3204 if (status)
3205 return status;
3206
3207 /* If RoCE may be enabled stash away half the EQs for RoCE */
3208 if (be_roce_supported(adapter))
3209 res.max_evt_qs /= 2;
3210 adapter->res = res;
3211
3212 if (be_physfn(adapter)) {
3213 status = be_cmd_get_profile_config(adapter, &res, 0);
3214 if (status)
3215 return status;
3216 adapter->res.max_vfs = res.max_vfs;
3217 }
3218
3219 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3220 be_max_txqs(adapter), be_max_rxqs(adapter),
3221 be_max_rss(adapter), be_max_eqs(adapter),
3222 be_max_vfs(adapter));
3223 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3224 be_max_uc(adapter), be_max_mc(adapter),
3225 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003226 }
3227
Sathya Perla92bf14a2013-08-27 16:57:32 +05303228 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003229}
3230
Sathya Perla39f1d942012-05-08 19:41:24 +00003231/* Routine to query per function resource limits */
3232static int be_get_config(struct be_adapter *adapter)
3233{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303234 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003235 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003236
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003237 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3238 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003239 &adapter->function_caps,
3240 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003241 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303242 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003243
Vasundhara Volam542963b2014-01-15 13:23:33 +05303244 if (be_physfn(adapter)) {
3245 status = be_cmd_get_active_profile(adapter, &profile_id);
3246 if (!status)
3247 dev_info(&adapter->pdev->dev,
3248 "Using profile 0x%x\n", profile_id);
3249 }
3250
Sathya Perla92bf14a2013-08-27 16:57:32 +05303251 status = be_get_resources(adapter);
3252 if (status)
3253 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003254
3255 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303256 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3257 GFP_KERNEL);
3258 if (!adapter->pmac_id)
3259 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003260
Sathya Perla92bf14a2013-08-27 16:57:32 +05303261 /* Sanitize cfg_num_qs based on HW and platform limits */
3262 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3263
3264 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003265}
3266
Sathya Perla95046b92013-07-23 15:25:02 +05303267static int be_mac_setup(struct be_adapter *adapter)
3268{
3269 u8 mac[ETH_ALEN];
3270 int status;
3271
3272 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3273 status = be_cmd_get_perm_mac(adapter, mac);
3274 if (status)
3275 return status;
3276
3277 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3278 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3279 } else {
3280 /* Maybe the HW was reset; dev_addr must be re-programmed */
3281 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3282 }
3283
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003284 /* For BE3-R VFs, the PF programs the initial MAC address */
3285 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3286 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3287 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303288 return 0;
3289}
3290
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303291static void be_schedule_worker(struct be_adapter *adapter)
3292{
3293 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3294 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3295}
3296
Sathya Perla77071332013-08-27 16:57:34 +05303297static int be_setup_queues(struct be_adapter *adapter)
3298{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303299 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303300 int status;
3301
3302 status = be_evt_queues_create(adapter);
3303 if (status)
3304 goto err;
3305
3306 status = be_tx_qs_create(adapter);
3307 if (status)
3308 goto err;
3309
3310 status = be_rx_cqs_create(adapter);
3311 if (status)
3312 goto err;
3313
3314 status = be_mcc_queues_create(adapter);
3315 if (status)
3316 goto err;
3317
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303318 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3319 if (status)
3320 goto err;
3321
3322 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3323 if (status)
3324 goto err;
3325
Sathya Perla77071332013-08-27 16:57:34 +05303326 return 0;
3327err:
3328 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3329 return status;
3330}
3331
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303332int be_update_queues(struct be_adapter *adapter)
3333{
3334 struct net_device *netdev = adapter->netdev;
3335 int status;
3336
3337 if (netif_running(netdev))
3338 be_close(netdev);
3339
3340 be_cancel_worker(adapter);
3341
3342 /* If any vectors have been shared with RoCE we cannot re-program
3343 * the MSIx table.
3344 */
3345 if (!adapter->num_msix_roce_vec)
3346 be_msix_disable(adapter);
3347
3348 be_clear_queues(adapter);
3349
3350 if (!msix_enabled(adapter)) {
3351 status = be_msix_enable(adapter);
3352 if (status)
3353 return status;
3354 }
3355
3356 status = be_setup_queues(adapter);
3357 if (status)
3358 return status;
3359
3360 be_schedule_worker(adapter);
3361
3362 if (netif_running(netdev))
3363 status = be_open(netdev);
3364
3365 return status;
3366}
3367
Sathya Perla5fb379e2009-06-18 00:02:59 +00003368static int be_setup(struct be_adapter *adapter)
3369{
Sathya Perla39f1d942012-05-08 19:41:24 +00003370 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303371 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003372 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373
Sathya Perla30128032011-11-10 19:17:57 +00003374 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003375
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003376 if (!lancer_chip(adapter))
3377 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003378
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003379 status = be_get_config(adapter);
3380 if (status)
3381 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003382
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003383 status = be_msix_enable(adapter);
3384 if (status)
3385 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003386
Sathya Perla77071332013-08-27 16:57:34 +05303387 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3388 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3389 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3390 en_flags |= BE_IF_FLAGS_RSS;
3391 en_flags = en_flags & be_if_cap_flags(adapter);
3392 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3393 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003394 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003395 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003396
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303397 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3398 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303399 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303400 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003401 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003402 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003403
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003404 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003405
Sathya Perla95046b92013-07-23 15:25:02 +05303406 status = be_mac_setup(adapter);
3407 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003408 goto err;
3409
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003410 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003411
Somnath Koture9e2a902013-10-24 14:37:53 +05303412 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3413 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3414 adapter->fw_ver);
3415 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3416 }
3417
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003418 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003419 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003420
3421 be_set_rx_mode(adapter->netdev);
3422
Suresh Reddy76a9e082014-01-15 13:23:40 +05303423 be_cmd_get_acpi_wol_cap(adapter);
3424
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003425 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003426
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003427 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3428 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003429 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003430
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303431 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303432 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003433 be_vf_setup(adapter);
3434 else
3435 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003436 }
3437
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003438 status = be_cmd_get_phy_info(adapter);
3439 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003440 adapter->phy.fc_autoneg = 1;
3441
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303442 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003443 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003444err:
3445 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446 return status;
3447}
3448
Ivan Vecera66268732011-12-08 01:31:21 +00003449#ifdef CONFIG_NET_POLL_CONTROLLER
3450static void be_netpoll(struct net_device *netdev)
3451{
3452 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003453 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003454 int i;
3455
Sathya Perlae49cc342012-11-27 19:50:02 +00003456 for_all_evt_queues(adapter, eqo, i) {
3457 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3458 napi_schedule(&eqo->napi);
3459 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003460
3461 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003462}
3463#endif
3464
Ajit Khaparde84517482009-09-04 03:12:16 +00003465#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003466static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003467
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003468static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003469 const u8 *p, u32 img_start, int image_size,
3470 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003471{
3472 u32 crc_offset;
3473 u8 flashed_crc[4];
3474 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003475
3476 crc_offset = hdr_size + img_start + image_size - 4;
3477
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003478 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003479
3480 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003481 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003482 if (status) {
3483 dev_err(&adapter->pdev->dev,
3484 "could not get crc from flash, not flashing redboot\n");
3485 return false;
3486 }
3487
3488 /*update redboot only if crc does not match*/
3489 if (!memcmp(flashed_crc, p, 4))
3490 return false;
3491 else
3492 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003493}
3494
Sathya Perla306f1342011-08-02 19:57:45 +00003495static bool phy_flashing_required(struct be_adapter *adapter)
3496{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003497 return (adapter->phy.phy_type == TN_8022 &&
3498 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003499}
3500
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003501static bool is_comp_in_ufi(struct be_adapter *adapter,
3502 struct flash_section_info *fsec, int type)
3503{
3504 int i = 0, img_type = 0;
3505 struct flash_section_info_g2 *fsec_g2 = NULL;
3506
Sathya Perlaca34fe32012-11-06 17:48:56 +00003507 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003508 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3509
3510 for (i = 0; i < MAX_FLASH_COMP; i++) {
3511 if (fsec_g2)
3512 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3513 else
3514 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3515
3516 if (img_type == type)
3517 return true;
3518 }
3519 return false;
3520
3521}
3522
Jingoo Han4188e7d2013-08-05 18:02:02 +09003523static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003524 int header_size,
3525 const struct firmware *fw)
3526{
3527 struct flash_section_info *fsec = NULL;
3528 const u8 *p = fw->data;
3529
3530 p += header_size;
3531 while (p < (fw->data + fw->size)) {
3532 fsec = (struct flash_section_info *)p;
3533 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3534 return fsec;
3535 p += 32;
3536 }
3537 return NULL;
3538}
3539
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003540static int be_flash(struct be_adapter *adapter, const u8 *img,
3541 struct be_dma_mem *flash_cmd, int optype, int img_size)
3542{
3543 u32 total_bytes = 0, flash_op, num_bytes = 0;
3544 int status = 0;
3545 struct be_cmd_write_flashrom *req = flash_cmd->va;
3546
3547 total_bytes = img_size;
3548 while (total_bytes) {
3549 num_bytes = min_t(u32, 32*1024, total_bytes);
3550
3551 total_bytes -= num_bytes;
3552
3553 if (!total_bytes) {
3554 if (optype == OPTYPE_PHY_FW)
3555 flash_op = FLASHROM_OPER_PHY_FLASH;
3556 else
3557 flash_op = FLASHROM_OPER_FLASH;
3558 } else {
3559 if (optype == OPTYPE_PHY_FW)
3560 flash_op = FLASHROM_OPER_PHY_SAVE;
3561 else
3562 flash_op = FLASHROM_OPER_SAVE;
3563 }
3564
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003565 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003566 img += num_bytes;
3567 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3568 flash_op, num_bytes);
3569 if (status) {
3570 if (status == ILLEGAL_IOCTL_REQ &&
3571 optype == OPTYPE_PHY_FW)
3572 break;
3573 dev_err(&adapter->pdev->dev,
3574 "cmd to write to flash rom failed.\n");
3575 return status;
3576 }
3577 }
3578 return 0;
3579}
3580
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003581/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003582static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003583 const struct firmware *fw,
3584 struct be_dma_mem *flash_cmd,
3585 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003586
Ajit Khaparde84517482009-09-04 03:12:16 +00003587{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003588 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003589 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003590 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003591 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003592 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003593 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003594
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003595 struct flash_comp gen3_flash_types[] = {
3596 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3598 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3599 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3600 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3601 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3602 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3603 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3604 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3605 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3606 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3607 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3608 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3609 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3610 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3611 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3612 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3613 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3614 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3615 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003616 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003617
3618 struct flash_comp gen2_flash_types[] = {
3619 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3621 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3622 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3623 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3624 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3625 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3626 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3627 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3628 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3629 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3630 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3631 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3632 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3633 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3634 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003635 };
3636
Sathya Perlaca34fe32012-11-06 17:48:56 +00003637 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003638 pflashcomp = gen3_flash_types;
3639 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003640 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003641 } else {
3642 pflashcomp = gen2_flash_types;
3643 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003644 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003645 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003646
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003647 /* Get flash section info*/
3648 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3649 if (!fsec) {
3650 dev_err(&adapter->pdev->dev,
3651 "Invalid Cookie. UFI corrupted ?\n");
3652 return -1;
3653 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003654 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003655 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003656 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003657
3658 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3659 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3660 continue;
3661
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003662 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3663 !phy_flashing_required(adapter))
3664 continue;
3665
3666 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3667 redboot = be_flash_redboot(adapter, fw->data,
3668 pflashcomp[i].offset, pflashcomp[i].size,
3669 filehdr_size + img_hdrs_size);
3670 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003671 continue;
3672 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003673
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003674 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003675 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003676 if (p + pflashcomp[i].size > fw->data + fw->size)
3677 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003678
3679 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3680 pflashcomp[i].size);
3681 if (status) {
3682 dev_err(&adapter->pdev->dev,
3683 "Flashing section type %d failed.\n",
3684 pflashcomp[i].img_type);
3685 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003686 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003687 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003688 return 0;
3689}
3690
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003691static int be_flash_skyhawk(struct be_adapter *adapter,
3692 const struct firmware *fw,
3693 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003694{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003695 int status = 0, i, filehdr_size = 0;
3696 int img_offset, img_size, img_optype, redboot;
3697 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3698 const u8 *p = fw->data;
3699 struct flash_section_info *fsec = NULL;
3700
3701 filehdr_size = sizeof(struct flash_file_hdr_g3);
3702 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3703 if (!fsec) {
3704 dev_err(&adapter->pdev->dev,
3705 "Invalid Cookie. UFI corrupted ?\n");
3706 return -1;
3707 }
3708
3709 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3710 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3711 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3712
3713 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3714 case IMAGE_FIRMWARE_iSCSI:
3715 img_optype = OPTYPE_ISCSI_ACTIVE;
3716 break;
3717 case IMAGE_BOOT_CODE:
3718 img_optype = OPTYPE_REDBOOT;
3719 break;
3720 case IMAGE_OPTION_ROM_ISCSI:
3721 img_optype = OPTYPE_BIOS;
3722 break;
3723 case IMAGE_OPTION_ROM_PXE:
3724 img_optype = OPTYPE_PXE_BIOS;
3725 break;
3726 case IMAGE_OPTION_ROM_FCoE:
3727 img_optype = OPTYPE_FCOE_BIOS;
3728 break;
3729 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3730 img_optype = OPTYPE_ISCSI_BACKUP;
3731 break;
3732 case IMAGE_NCSI:
3733 img_optype = OPTYPE_NCSI_FW;
3734 break;
3735 default:
3736 continue;
3737 }
3738
3739 if (img_optype == OPTYPE_REDBOOT) {
3740 redboot = be_flash_redboot(adapter, fw->data,
3741 img_offset, img_size,
3742 filehdr_size + img_hdrs_size);
3743 if (!redboot)
3744 continue;
3745 }
3746
3747 p = fw->data;
3748 p += filehdr_size + img_offset + img_hdrs_size;
3749 if (p + img_size > fw->data + fw->size)
3750 return -1;
3751
3752 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3753 if (status) {
3754 dev_err(&adapter->pdev->dev,
3755 "Flashing section type %d failed.\n",
3756 fsec->fsec_entry[i].type);
3757 return status;
3758 }
3759 }
3760 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003761}
3762
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003763static int lancer_fw_download(struct be_adapter *adapter,
3764 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003765{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003766#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3767#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3768 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003769 const u8 *data_ptr = NULL;
3770 u8 *dest_image_ptr = NULL;
3771 size_t image_size = 0;
3772 u32 chunk_size = 0;
3773 u32 data_written = 0;
3774 u32 offset = 0;
3775 int status = 0;
3776 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003777 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003778
3779 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3780 dev_err(&adapter->pdev->dev,
3781 "FW Image not properly aligned. "
3782 "Length must be 4 byte aligned.\n");
3783 status = -EINVAL;
3784 goto lancer_fw_exit;
3785 }
3786
3787 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3788 + LANCER_FW_DOWNLOAD_CHUNK;
3789 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003790 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003791 if (!flash_cmd.va) {
3792 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003793 goto lancer_fw_exit;
3794 }
3795
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003796 dest_image_ptr = flash_cmd.va +
3797 sizeof(struct lancer_cmd_req_write_object);
3798 image_size = fw->size;
3799 data_ptr = fw->data;
3800
3801 while (image_size) {
3802 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3803
3804 /* Copy the image chunk content. */
3805 memcpy(dest_image_ptr, data_ptr, chunk_size);
3806
3807 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003808 chunk_size, offset,
3809 LANCER_FW_DOWNLOAD_LOCATION,
3810 &data_written, &change_status,
3811 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003812 if (status)
3813 break;
3814
3815 offset += data_written;
3816 data_ptr += data_written;
3817 image_size -= data_written;
3818 }
3819
3820 if (!status) {
3821 /* Commit the FW written */
3822 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003823 0, offset,
3824 LANCER_FW_DOWNLOAD_LOCATION,
3825 &data_written, &change_status,
3826 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003827 }
3828
3829 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3830 flash_cmd.dma);
3831 if (status) {
3832 dev_err(&adapter->pdev->dev,
3833 "Firmware load error. "
3834 "Status code: 0x%x Additional Status: 0x%x\n",
3835 status, add_status);
3836 goto lancer_fw_exit;
3837 }
3838
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003839 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303840 dev_info(&adapter->pdev->dev,
3841 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003842 status = lancer_physdev_ctrl(adapter,
3843 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003844 if (status) {
3845 dev_err(&adapter->pdev->dev,
3846 "Adapter busy for FW reset.\n"
3847 "New FW will not be active.\n");
3848 goto lancer_fw_exit;
3849 }
3850 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3851 dev_err(&adapter->pdev->dev,
3852 "System reboot required for new FW"
3853 " to be active\n");
3854 }
3855
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003856 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3857lancer_fw_exit:
3858 return status;
3859}
3860
Sathya Perlaca34fe32012-11-06 17:48:56 +00003861#define UFI_TYPE2 2
3862#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003863#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003864#define UFI_TYPE4 4
3865static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003866 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003867{
3868 if (fhdr == NULL)
3869 goto be_get_ufi_exit;
3870
Sathya Perlaca34fe32012-11-06 17:48:56 +00003871 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3872 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003873 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3874 if (fhdr->asic_type_rev == 0x10)
3875 return UFI_TYPE3R;
3876 else
3877 return UFI_TYPE3;
3878 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003879 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003880
3881be_get_ufi_exit:
3882 dev_err(&adapter->pdev->dev,
3883 "UFI and Interface are not compatible for flashing\n");
3884 return -1;
3885}
3886
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003887static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3888{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003889 struct flash_file_hdr_g3 *fhdr3;
3890 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003891 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003892 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003893 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003894
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003895 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003896 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3897 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003898 if (!flash_cmd.va) {
3899 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003900 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003901 }
3902
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003903 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003904 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003905
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003906 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003907
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003908 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3909 for (i = 0; i < num_imgs; i++) {
3910 img_hdr_ptr = (struct image_hdr *)(fw->data +
3911 (sizeof(struct flash_file_hdr_g3) +
3912 i * sizeof(struct image_hdr)));
3913 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003914 switch (ufi_type) {
3915 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003916 status = be_flash_skyhawk(adapter, fw,
3917 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003918 break;
3919 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003920 status = be_flash_BEx(adapter, fw, &flash_cmd,
3921 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003922 break;
3923 case UFI_TYPE3:
3924 /* Do not flash this ufi on BE3-R cards */
3925 if (adapter->asic_rev < 0x10)
3926 status = be_flash_BEx(adapter, fw,
3927 &flash_cmd,
3928 num_imgs);
3929 else {
3930 status = -1;
3931 dev_err(&adapter->pdev->dev,
3932 "Can't load BE3 UFI on BE3R\n");
3933 }
3934 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003935 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003936 }
3937
Sathya Perlaca34fe32012-11-06 17:48:56 +00003938 if (ufi_type == UFI_TYPE2)
3939 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003940 else if (ufi_type == -1)
3941 status = -1;
3942
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003943 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3944 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003945 if (status) {
3946 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003947 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003948 }
3949
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003950 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003951
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003952be_fw_exit:
3953 return status;
3954}
3955
3956int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3957{
3958 const struct firmware *fw;
3959 int status;
3960
3961 if (!netif_running(adapter->netdev)) {
3962 dev_err(&adapter->pdev->dev,
3963 "Firmware load not allowed (interface is down)\n");
3964 return -1;
3965 }
3966
3967 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3968 if (status)
3969 goto fw_exit;
3970
3971 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3972
3973 if (lancer_chip(adapter))
3974 status = lancer_fw_download(adapter, fw);
3975 else
3976 status = be_fw_download(adapter, fw);
3977
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003978 if (!status)
3979 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3980 adapter->fw_on_flash);
3981
Ajit Khaparde84517482009-09-04 03:12:16 +00003982fw_exit:
3983 release_firmware(fw);
3984 return status;
3985}
3986
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003987static int be_ndo_bridge_setlink(struct net_device *dev,
3988 struct nlmsghdr *nlh)
3989{
3990 struct be_adapter *adapter = netdev_priv(dev);
3991 struct nlattr *attr, *br_spec;
3992 int rem;
3993 int status = 0;
3994 u16 mode = 0;
3995
3996 if (!sriov_enabled(adapter))
3997 return -EOPNOTSUPP;
3998
3999 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4000
4001 nla_for_each_nested(attr, br_spec, rem) {
4002 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4003 continue;
4004
4005 mode = nla_get_u16(attr);
4006 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4007 return -EINVAL;
4008
4009 status = be_cmd_set_hsw_config(adapter, 0, 0,
4010 adapter->if_handle,
4011 mode == BRIDGE_MODE_VEPA ?
4012 PORT_FWD_TYPE_VEPA :
4013 PORT_FWD_TYPE_VEB);
4014 if (status)
4015 goto err;
4016
4017 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4018 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4019
4020 return status;
4021 }
4022err:
4023 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4024 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4025
4026 return status;
4027}
4028
4029static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4030 struct net_device *dev,
4031 u32 filter_mask)
4032{
4033 struct be_adapter *adapter = netdev_priv(dev);
4034 int status = 0;
4035 u8 hsw_mode;
4036
4037 if (!sriov_enabled(adapter))
4038 return 0;
4039
4040 /* BE and Lancer chips support VEB mode only */
4041 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4042 hsw_mode = PORT_FWD_TYPE_VEB;
4043 } else {
4044 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4045 adapter->if_handle, &hsw_mode);
4046 if (status)
4047 return 0;
4048 }
4049
4050 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4051 hsw_mode == PORT_FWD_TYPE_VEPA ?
4052 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4053}
4054
stephen hemmingere5686ad2012-01-05 19:10:25 +00004055static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004056 .ndo_open = be_open,
4057 .ndo_stop = be_close,
4058 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004059 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004060 .ndo_set_mac_address = be_mac_addr_set,
4061 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004062 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004063 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4065 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004066 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004067 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004068 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004069 .ndo_get_vf_config = be_get_vf_config,
4070#ifdef CONFIG_NET_POLL_CONTROLLER
4071 .ndo_poll_controller = be_netpoll,
4072#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004073 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4074 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304075#ifdef CONFIG_NET_RX_BUSY_POLL
4076 .ndo_busy_poll = be_busy_poll
4077#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004078};
4079
4080static void be_netdev_init(struct net_device *netdev)
4081{
4082 struct be_adapter *adapter = netdev_priv(netdev);
4083
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004084 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004085 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004086 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004087 if (be_multi_rxq(adapter))
4088 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004089
4090 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004091 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004092
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004093 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004094 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004095
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004096 netdev->priv_flags |= IFF_UNICAST_FLT;
4097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004098 netdev->flags |= IFF_MULTICAST;
4099
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004100 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004102 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004103
4104 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004105}
4106
4107static void be_unmap_pci_bars(struct be_adapter *adapter)
4108{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004109 if (adapter->csr)
4110 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004111 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004112 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004113}
4114
Sathya Perlace66f782012-11-06 17:48:58 +00004115static int db_bar(struct be_adapter *adapter)
4116{
4117 if (lancer_chip(adapter) || !be_physfn(adapter))
4118 return 0;
4119 else
4120 return 4;
4121}
4122
4123static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004124{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004125 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004126 adapter->roce_db.size = 4096;
4127 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4128 db_bar(adapter));
4129 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4130 db_bar(adapter));
4131 }
Parav Pandit045508a2012-03-26 14:27:13 +00004132 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004133}
4134
4135static int be_map_pci_bars(struct be_adapter *adapter)
4136{
4137 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004138
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004139 if (BEx_chip(adapter) && be_physfn(adapter)) {
4140 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4141 if (adapter->csr == NULL)
4142 return -ENOMEM;
4143 }
4144
Sathya Perlace66f782012-11-06 17:48:58 +00004145 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146 if (addr == NULL)
4147 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004148 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004149
4150 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004151 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004152
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004153pci_map_err:
4154 be_unmap_pci_bars(adapter);
4155 return -ENOMEM;
4156}
4157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158static void be_ctrl_cleanup(struct be_adapter *adapter)
4159{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004160 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004161
4162 be_unmap_pci_bars(adapter);
4163
4164 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004165 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4166 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004167
Sathya Perla5b8821b2011-08-02 19:57:44 +00004168 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004169 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004170 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4171 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004172}
4173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004174static int be_ctrl_init(struct be_adapter *adapter)
4175{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004176 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4177 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004178 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004179 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004180 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004181
Sathya Perlace66f782012-11-06 17:48:58 +00004182 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4183 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4184 SLI_INTF_FAMILY_SHIFT;
4185 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4186
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004187 status = be_map_pci_bars(adapter);
4188 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004189 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004190
4191 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004192 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4193 mbox_mem_alloc->size,
4194 &mbox_mem_alloc->dma,
4195 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004196 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004197 status = -ENOMEM;
4198 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004199 }
4200 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4201 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4202 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4203 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004204
Sathya Perla5b8821b2011-08-02 19:57:44 +00004205 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004206 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4207 rx_filter->size, &rx_filter->dma,
4208 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004209 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004210 status = -ENOMEM;
4211 goto free_mbox;
4212 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004213
Ivan Vecera29849612010-12-14 05:43:19 +00004214 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004215 spin_lock_init(&adapter->mcc_lock);
4216 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004217
Suresh Reddy5eeff632014-01-06 13:02:24 +05304218 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004219 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004220 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004221
4222free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004223 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4224 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004225
4226unmap_pci_bars:
4227 be_unmap_pci_bars(adapter);
4228
4229done:
4230 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231}
4232
4233static void be_stats_cleanup(struct be_adapter *adapter)
4234{
Sathya Perla3abcded2010-10-03 22:12:27 -07004235 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004236
4237 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004238 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4239 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004240}
4241
4242static int be_stats_init(struct be_adapter *adapter)
4243{
Sathya Perla3abcded2010-10-03 22:12:27 -07004244 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004245
Sathya Perlaca34fe32012-11-06 17:48:56 +00004246 if (lancer_chip(adapter))
4247 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4248 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004249 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004250 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004251 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004252 else
4253 /* ALL non-BE ASICs */
4254 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004255
Joe Perchesede23fa2013-08-26 22:45:23 -07004256 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4257 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004258 if (cmd->va == NULL)
4259 return -1;
4260 return 0;
4261}
4262
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004263static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004264{
4265 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004267 if (!adapter)
4268 return;
4269
Parav Pandit045508a2012-03-26 14:27:13 +00004270 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004271 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004272
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004273 cancel_delayed_work_sync(&adapter->func_recovery_work);
4274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275 unregister_netdev(adapter->netdev);
4276
Sathya Perla5fb379e2009-06-18 00:02:59 +00004277 be_clear(adapter);
4278
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004279 /* tell fw we're done with firing cmds */
4280 be_cmd_fw_clean(adapter);
4281
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004282 be_stats_cleanup(adapter);
4283
4284 be_ctrl_cleanup(adapter);
4285
Sathya Perlad6b6d982012-09-05 01:56:48 +00004286 pci_disable_pcie_error_reporting(pdev);
4287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004288 pci_release_regions(pdev);
4289 pci_disable_device(pdev);
4290
4291 free_netdev(adapter->netdev);
4292}
4293
Sathya Perla39f1d942012-05-08 19:41:24 +00004294static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004295{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304296 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004297
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004298 status = be_cmd_get_cntl_attributes(adapter);
4299 if (status)
4300 return status;
4301
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004302 /* Must be a power of 2 or else MODULO will BUG_ON */
4303 adapter->be_get_temp_freq = 64;
4304
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304305 if (BEx_chip(adapter)) {
4306 level = be_cmd_get_fw_log_level(adapter);
4307 adapter->msg_enable =
4308 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4309 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004310
Sathya Perla92bf14a2013-08-27 16:57:32 +05304311 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004312 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004313}
4314
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004315static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004316{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004317 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004318 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004319
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004320 status = lancer_test_and_set_rdy_state(adapter);
4321 if (status)
4322 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004323
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004324 if (netif_running(adapter->netdev))
4325 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004326
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004327 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004328
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004329 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004330
4331 status = be_setup(adapter);
4332 if (status)
4333 goto err;
4334
4335 if (netif_running(adapter->netdev)) {
4336 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004337 if (status)
4338 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004339 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004340
Somnath Kotur4bebb562013-12-05 12:07:55 +05304341 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004342 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004343err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004344 if (status == -EAGAIN)
4345 dev_err(dev, "Waiting for resource provisioning\n");
4346 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304347 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004348
4349 return status;
4350}
4351
4352static void be_func_recovery_task(struct work_struct *work)
4353{
4354 struct be_adapter *adapter =
4355 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004356 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004357
4358 be_detect_error(adapter);
4359
4360 if (adapter->hw_error && lancer_chip(adapter)) {
4361
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004362 rtnl_lock();
4363 netif_device_detach(adapter->netdev);
4364 rtnl_unlock();
4365
4366 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004367 if (!status)
4368 netif_device_attach(adapter->netdev);
4369 }
4370
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004371 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4372 * no need to attempt further recovery.
4373 */
4374 if (!status || status == -EAGAIN)
4375 schedule_delayed_work(&adapter->func_recovery_work,
4376 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004377}
4378
4379static void be_worker(struct work_struct *work)
4380{
4381 struct be_adapter *adapter =
4382 container_of(work, struct be_adapter, work.work);
4383 struct be_rx_obj *rxo;
4384 int i;
4385
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004386 /* when interrupts are not yet enabled, just reap any pending
4387 * mcc completions */
4388 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004389 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004390 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004391 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004392 goto reschedule;
4393 }
4394
4395 if (!adapter->stats_cmd_sent) {
4396 if (lancer_chip(adapter))
4397 lancer_cmd_get_pport_stats(adapter,
4398 &adapter->stats_cmd);
4399 else
4400 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4401 }
4402
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304403 if (be_physfn(adapter) &&
4404 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004405 be_cmd_get_die_temperature(adapter);
4406
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004407 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304408 /* Replenish RX-queues starved due to memory
4409 * allocation failures.
4410 */
4411 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004412 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004413 }
4414
Sathya Perla2632baf2013-10-01 16:00:00 +05304415 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004416
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004417reschedule:
4418 adapter->work_counter++;
4419 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4420}
4421
Sathya Perla257a3fe2013-06-14 15:54:51 +05304422/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004423static bool be_reset_required(struct be_adapter *adapter)
4424{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304425 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004426}
4427
Sathya Perlad3791422012-09-28 04:39:44 +00004428static char *mc_name(struct be_adapter *adapter)
4429{
4430 if (adapter->function_mode & FLEX10_MODE)
4431 return "FLEX10";
4432 else if (adapter->function_mode & VNIC_MODE)
4433 return "vNIC";
4434 else if (adapter->function_mode & UMC_ENABLED)
4435 return "UMC";
4436 else
4437 return "";
4438}
4439
4440static inline char *func_name(struct be_adapter *adapter)
4441{
4442 return be_physfn(adapter) ? "PF" : "VF";
4443}
4444
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004445static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004446{
4447 int status = 0;
4448 struct be_adapter *adapter;
4449 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004450 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004451
4452 status = pci_enable_device(pdev);
4453 if (status)
4454 goto do_none;
4455
4456 status = pci_request_regions(pdev, DRV_NAME);
4457 if (status)
4458 goto disable_dev;
4459 pci_set_master(pdev);
4460
Sathya Perla7f640062012-06-05 19:37:20 +00004461 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004462 if (netdev == NULL) {
4463 status = -ENOMEM;
4464 goto rel_reg;
4465 }
4466 adapter = netdev_priv(netdev);
4467 adapter->pdev = pdev;
4468 pci_set_drvdata(pdev, adapter);
4469 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004470 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004471
Russell King4c15c242013-06-26 23:49:11 +01004472 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004473 if (!status) {
4474 netdev->features |= NETIF_F_HIGHDMA;
4475 } else {
Russell King4c15c242013-06-26 23:49:11 +01004476 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004477 if (status) {
4478 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4479 goto free_netdev;
4480 }
4481 }
4482
Ajit Khapardeea58c182013-10-18 16:06:24 -05004483 if (be_physfn(adapter)) {
4484 status = pci_enable_pcie_error_reporting(pdev);
4485 if (!status)
4486 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4487 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004489 status = be_ctrl_init(adapter);
4490 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004491 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004492
Sathya Perla2243e2e2009-11-22 22:02:03 +00004493 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004494 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004495 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004496 if (status)
4497 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004498 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004499
Sathya Perla39f1d942012-05-08 19:41:24 +00004500 if (be_reset_required(adapter)) {
4501 status = be_cmd_reset_function(adapter);
4502 if (status)
4503 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004504
Kalesh AP2d177be2013-04-28 22:22:29 +00004505 /* Wait for interrupts to quiesce after an FLR */
4506 msleep(100);
4507 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004508
4509 /* Allow interrupts for other ULPs running on NIC function */
4510 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004511
Kalesh AP2d177be2013-04-28 22:22:29 +00004512 /* tell fw we're ready to fire cmds */
4513 status = be_cmd_fw_init(adapter);
4514 if (status)
4515 goto ctrl_clean;
4516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517 status = be_stats_init(adapter);
4518 if (status)
4519 goto ctrl_clean;
4520
Sathya Perla39f1d942012-05-08 19:41:24 +00004521 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004522 if (status)
4523 goto stats_clean;
4524
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004525 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004526 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004527 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004528
Sathya Perla5fb379e2009-06-18 00:02:59 +00004529 status = be_setup(adapter);
4530 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004531 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004532
Sathya Perla3abcded2010-10-03 22:12:27 -07004533 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004534 status = register_netdev(netdev);
4535 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004536 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004537
Parav Pandit045508a2012-03-26 14:27:13 +00004538 be_roce_dev_add(adapter);
4539
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004540 schedule_delayed_work(&adapter->func_recovery_work,
4541 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004542
4543 be_cmd_query_port_name(adapter, &port_name);
4544
Sathya Perlad3791422012-09-28 04:39:44 +00004545 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4546 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004548 return 0;
4549
Sathya Perla5fb379e2009-06-18 00:02:59 +00004550unsetup:
4551 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004552stats_clean:
4553 be_stats_cleanup(adapter);
4554ctrl_clean:
4555 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004556free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004557 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004558rel_reg:
4559 pci_release_regions(pdev);
4560disable_dev:
4561 pci_disable_device(pdev);
4562do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004563 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564 return status;
4565}
4566
4567static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4568{
4569 struct be_adapter *adapter = pci_get_drvdata(pdev);
4570 struct net_device *netdev = adapter->netdev;
4571
Suresh Reddy76a9e082014-01-15 13:23:40 +05304572 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004573 be_setup_wol(adapter, true);
4574
Ajit Khaparded4360d62013-11-22 12:51:09 -06004575 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004576 cancel_delayed_work_sync(&adapter->func_recovery_work);
4577
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004578 netif_device_detach(netdev);
4579 if (netif_running(netdev)) {
4580 rtnl_lock();
4581 be_close(netdev);
4582 rtnl_unlock();
4583 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004584 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004585
4586 pci_save_state(pdev);
4587 pci_disable_device(pdev);
4588 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4589 return 0;
4590}
4591
4592static int be_resume(struct pci_dev *pdev)
4593{
4594 int status = 0;
4595 struct be_adapter *adapter = pci_get_drvdata(pdev);
4596 struct net_device *netdev = adapter->netdev;
4597
4598 netif_device_detach(netdev);
4599
4600 status = pci_enable_device(pdev);
4601 if (status)
4602 return status;
4603
Yijing Wang1ca01512013-06-27 20:53:42 +08004604 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004605 pci_restore_state(pdev);
4606
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304607 status = be_fw_wait_ready(adapter);
4608 if (status)
4609 return status;
4610
Ajit Khaparded4360d62013-11-22 12:51:09 -06004611 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004612 /* tell fw we're ready to fire cmds */
4613 status = be_cmd_fw_init(adapter);
4614 if (status)
4615 return status;
4616
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004617 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004618 if (netif_running(netdev)) {
4619 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004620 be_open(netdev);
4621 rtnl_unlock();
4622 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004623
4624 schedule_delayed_work(&adapter->func_recovery_work,
4625 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004626 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004627
Suresh Reddy76a9e082014-01-15 13:23:40 +05304628 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004629 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004630
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004631 return 0;
4632}
4633
Sathya Perla82456b02010-02-17 01:35:37 +00004634/*
4635 * An FLR will stop BE from DMAing any data.
4636 */
4637static void be_shutdown(struct pci_dev *pdev)
4638{
4639 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004640
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004641 if (!adapter)
4642 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004643
Sathya Perla0f4a6822011-03-21 20:49:28 +00004644 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004645 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004646
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004647 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004648
Ajit Khaparde57841862011-04-06 18:08:43 +00004649 be_cmd_reset_function(adapter);
4650
Sathya Perla82456b02010-02-17 01:35:37 +00004651 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004652}
4653
Sathya Perlacf588472010-02-14 21:22:01 +00004654static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4655 pci_channel_state_t state)
4656{
4657 struct be_adapter *adapter = pci_get_drvdata(pdev);
4658 struct net_device *netdev = adapter->netdev;
4659
4660 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4661
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004662 if (!adapter->eeh_error) {
4663 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004664
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004665 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004666
Sathya Perlacf588472010-02-14 21:22:01 +00004667 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004668 netif_device_detach(netdev);
4669 if (netif_running(netdev))
4670 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004671 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004672
4673 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004674 }
Sathya Perlacf588472010-02-14 21:22:01 +00004675
4676 if (state == pci_channel_io_perm_failure)
4677 return PCI_ERS_RESULT_DISCONNECT;
4678
4679 pci_disable_device(pdev);
4680
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004681 /* The error could cause the FW to trigger a flash debug dump.
4682 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004683 * can cause it not to recover; wait for it to finish.
4684 * Wait only for first function as it is needed only once per
4685 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004686 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004687 if (pdev->devfn == 0)
4688 ssleep(30);
4689
Sathya Perlacf588472010-02-14 21:22:01 +00004690 return PCI_ERS_RESULT_NEED_RESET;
4691}
4692
4693static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4694{
4695 struct be_adapter *adapter = pci_get_drvdata(pdev);
4696 int status;
4697
4698 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004699
4700 status = pci_enable_device(pdev);
4701 if (status)
4702 return PCI_ERS_RESULT_DISCONNECT;
4703
4704 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004705 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004706 pci_restore_state(pdev);
4707
4708 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004709 dev_info(&adapter->pdev->dev,
4710 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004711 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004712 if (status)
4713 return PCI_ERS_RESULT_DISCONNECT;
4714
Sathya Perlad6b6d982012-09-05 01:56:48 +00004715 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004716 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004717 return PCI_ERS_RESULT_RECOVERED;
4718}
4719
4720static void be_eeh_resume(struct pci_dev *pdev)
4721{
4722 int status = 0;
4723 struct be_adapter *adapter = pci_get_drvdata(pdev);
4724 struct net_device *netdev = adapter->netdev;
4725
4726 dev_info(&adapter->pdev->dev, "EEH resume\n");
4727
4728 pci_save_state(pdev);
4729
Kalesh AP2d177be2013-04-28 22:22:29 +00004730 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004731 if (status)
4732 goto err;
4733
Kalesh AP2d177be2013-04-28 22:22:29 +00004734 /* tell fw we're ready to fire cmds */
4735 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004736 if (status)
4737 goto err;
4738
Sathya Perlacf588472010-02-14 21:22:01 +00004739 status = be_setup(adapter);
4740 if (status)
4741 goto err;
4742
4743 if (netif_running(netdev)) {
4744 status = be_open(netdev);
4745 if (status)
4746 goto err;
4747 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004748
4749 schedule_delayed_work(&adapter->func_recovery_work,
4750 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004751 netif_device_attach(netdev);
4752 return;
4753err:
4754 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004755}
4756
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004757static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004758 .error_detected = be_eeh_err_detected,
4759 .slot_reset = be_eeh_reset,
4760 .resume = be_eeh_resume,
4761};
4762
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004763static struct pci_driver be_driver = {
4764 .name = DRV_NAME,
4765 .id_table = be_dev_ids,
4766 .probe = be_probe,
4767 .remove = be_remove,
4768 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004769 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004770 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004771 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004772};
4773
4774static int __init be_init_module(void)
4775{
Joe Perches8e95a202009-12-03 07:58:21 +00004776 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4777 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004778 printk(KERN_WARNING DRV_NAME
4779 " : Module param rx_frag_size must be 2048/4096/8192."
4780 " Using 2048\n");
4781 rx_frag_size = 2048;
4782 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004784 return pci_register_driver(&be_driver);
4785}
4786module_init(be_init_module);
4787
4788static void __exit be_exit_module(void)
4789{
4790 pci_unregister_driver(&be_driver);
4791}
4792module_exit(be_exit_module);