blob: a9da6f94e2fd5e2a5c9eaabed0f89246145bcf56 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000925 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500926 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000927 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000929 if (skb_padto(skb, 36))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530930 goto err;
Somnath Kotur48265662013-05-26 21:08:47 +0000931 skb->len = 36;
932 }
933
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000936 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000937 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
946
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000953
Somnath Kotur93040ae2012-06-26 22:32:10 +0000954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530962 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000983 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530984 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000985 }
986
Sathya Perlaee9c7992013-05-22 23:04:55 +0000987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530990err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000991 return NULL;
992}
993
994static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
995{
996 struct be_adapter *adapter = netdev_priv(netdev);
997 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
998 struct be_queue_info *txq = &txo->q;
999 bool dummy_wrb, stopped = false;
1000 u32 wrb_cnt = 0, copied = 0;
1001 bool skip_hw_vlan = false;
1002 u32 start = txq->head;
1003
1004 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301005 if (!skb) {
1006 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001007 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301008 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001009
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001010 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001012 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1013 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001014 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001015 int gso_segs = skb_shinfo(skb)->gso_segs;
1016
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001017 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001018 BUG_ON(txo->sent_skb_list[start]);
1019 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001021 /* Ensure txq has space for the next skb; Else stop the queue
1022 * *BEFORE* ringing the tx doorbell, so that we serialze the
1023 * tx compls of the current transmit which'll wake up the queue
1024 */
Sathya Perla7101e112010-03-22 20:41:12 +00001025 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001026 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1027 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001028 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001029 stopped = true;
1030 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001032 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001033
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001034 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001035 } else {
1036 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301037 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001038 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040 return NETDEV_TX_OK;
1041}
1042
1043static int be_change_mtu(struct net_device *netdev, int new_mtu)
1044{
1045 struct be_adapter *adapter = netdev_priv(netdev);
1046 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001047 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1048 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049 dev_info(&adapter->pdev->dev,
1050 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001051 BE_MIN_MTU,
1052 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 return -EINVAL;
1054 }
1055 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1056 netdev->mtu, new_mtu);
1057 netdev->mtu = new_mtu;
1058 return 0;
1059}
1060
1061/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001062 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1063 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 */
Sathya Perla10329df2012-06-05 19:37:18 +00001065static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066{
Sathya Perla10329df2012-06-05 19:37:18 +00001067 u16 vids[BE_NUM_VLANS_SUPPORTED];
1068 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001069 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001070
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001071 /* No need to further configure vids if in promiscuous mode */
1072 if (adapter->promiscuous)
1073 return 0;
1074
Sathya Perla92bf14a2013-08-27 16:57:32 +05301075 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001076 goto set_vlan_promisc;
1077
1078 /* Construct VLAN Table to give to HW */
1079 for (i = 0; i < VLAN_N_VID; i++)
1080 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001081 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001082
1083 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001084 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001086 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001087 /* Set to VLAN promisc mode as setting VLAN filter failed */
1088 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1089 goto set_vlan_promisc;
1090 dev_err(&adapter->pdev->dev,
1091 "Setting HW VLAN filtering failed.\n");
1092 } else {
1093 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1094 /* hw VLAN filtering re-enabled. */
1095 status = be_cmd_rx_filter(adapter,
1096 BE_FLAGS_VLAN_PROMISC, OFF);
1097 if (!status) {
1098 dev_info(&adapter->pdev->dev,
1099 "Disabling VLAN Promiscuous mode.\n");
1100 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001101 }
1102 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001104
Sathya Perlab31c50a2009-09-17 10:30:13 -07001105 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001106
1107set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301108 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1109 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001110
1111 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112 if (!status) {
1113 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001114 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115 } else
1116 dev_err(&adapter->pdev->dev,
1117 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001118 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119}
1120
Patrick McHardy80d5c362013-04-19 02:04:28 +00001121static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122{
1123 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001124 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001126 /* Packets with VID 0 are always received by Lancer by default */
1127 if (lancer_chip(adapter) && vid == 0)
1128 goto ret;
1129
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 adapter->vlan_tag[vid] = 1;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301131 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001132
Somnath Kotura6b74e02014-01-21 15:50:55 +05301133 status = be_vid_config(adapter);
1134 if (status) {
1135 adapter->vlans_added--;
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001136 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301137 }
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001138ret:
1139 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140}
1141
Patrick McHardy80d5c362013-04-19 02:04:28 +00001142static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
1144 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001145 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001147 /* Packets with VID 0 are always received by Lancer by default */
1148 if (lancer_chip(adapter) && vid == 0)
1149 goto ret;
1150
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 adapter->vlan_tag[vid] = 0;
Somnath Kotura6b74e02014-01-21 15:50:55 +05301152 status = be_vid_config(adapter);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001153 if (!status)
1154 adapter->vlans_added--;
1155 else
1156 adapter->vlan_tag[vid] = 1;
1157ret:
1158 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159}
1160
Somnath kotur7ad09452014-03-03 14:24:43 +05301161static void be_clear_promisc(struct be_adapter *adapter)
1162{
1163 adapter->promiscuous = false;
1164 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1165
1166 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1167}
1168
Sathya Perlaa54769f2011-10-24 02:45:00 +00001169static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170{
1171 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001172 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173
1174 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001175 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001176 adapter->promiscuous = true;
1177 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001179
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001180 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001181 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301182 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001183 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001184 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001185 }
1186
Sathya Perlae7b909a2009-11-22 22:01:10 +00001187 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001188 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301189 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001190 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001191 goto done;
1192 }
1193
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001194 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1195 struct netdev_hw_addr *ha;
1196 int i = 1; /* First slot is claimed by the Primary MAC */
1197
1198 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1199 be_cmd_pmac_del(adapter, adapter->if_handle,
1200 adapter->pmac_id[i], 0);
1201 }
1202
Sathya Perla92bf14a2013-08-27 16:57:32 +05301203 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001204 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1205 adapter->promiscuous = true;
1206 goto done;
1207 }
1208
1209 netdev_for_each_uc_addr(ha, adapter->netdev) {
1210 adapter->uc_macs++; /* First slot is for Primary MAC */
1211 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1212 adapter->if_handle,
1213 &adapter->pmac_id[adapter->uc_macs], 0);
1214 }
1215 }
1216
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001217 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1218
1219 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1220 if (status) {
1221 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1222 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1223 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1224 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001225done:
1226 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227}
1228
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001229static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1230{
1231 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001232 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001233 int status;
1234
Sathya Perla11ac75e2011-12-13 00:58:50 +00001235 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001236 return -EPERM;
1237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001239 return -EINVAL;
1240
Sathya Perla3175d8c2013-07-23 15:25:03 +05301241 if (BEx_chip(adapter)) {
1242 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1243 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001244
Sathya Perla11ac75e2011-12-13 00:58:50 +00001245 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1246 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301247 } else {
1248 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1249 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001250 }
1251
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001252 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001253 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1254 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001255 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001256 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001257
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001258 return status;
1259}
1260
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001261static int be_get_vf_config(struct net_device *netdev, int vf,
1262 struct ifla_vf_info *vi)
1263{
1264 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001265 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001268 return -EPERM;
1269
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001271 return -EINVAL;
1272
1273 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001274 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001275 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1276 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001278
1279 return 0;
1280}
1281
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001282static int be_set_vf_vlan(struct net_device *netdev,
1283 int vf, u16 vlan, u8 qos)
1284{
1285 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001286 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001287 int status = 0;
1288
Sathya Perla11ac75e2011-12-13 00:58:50 +00001289 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001290 return -EPERM;
1291
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001292 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001293 return -EINVAL;
1294
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001295 if (vlan || qos) {
1296 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301297 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001298 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001300 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001301 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301302 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1303 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001304 }
1305
Somnath Koturc5022242014-03-03 14:24:20 +05301306 if (!status)
1307 vf_cfg->vlan_tag = vlan;
1308 else
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001309 dev_info(&adapter->pdev->dev,
Somnath Koturc5022242014-03-03 14:24:20 +05301310 "VLAN %d config on VF %d failed\n", vlan, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001311 return status;
1312}
1313
Ajit Khapardee1d18732010-07-23 01:52:13 +00001314static int be_set_vf_tx_rate(struct net_device *netdev,
1315 int vf, int rate)
1316{
1317 struct be_adapter *adapter = netdev_priv(netdev);
1318 int status = 0;
1319
Sathya Perla11ac75e2011-12-13 00:58:50 +00001320 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001321 return -EPERM;
1322
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001323 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001324 return -EINVAL;
1325
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001326 if (rate < 100 || rate > 10000) {
1327 dev_err(&adapter->pdev->dev,
1328 "tx rate must be between 100 and 10000 Mbps\n");
1329 return -EINVAL;
1330 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001331
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001332 if (lancer_chip(adapter))
1333 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1334 else
1335 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001336
1337 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001338 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001339 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001340 else
1341 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001342 return status;
1343}
1344
Sathya Perla2632baf2013-10-01 16:00:00 +05301345static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1346 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347{
Sathya Perla2632baf2013-10-01 16:00:00 +05301348 aic->rx_pkts_prev = rx_pkts;
1349 aic->tx_reqs_prev = tx_pkts;
1350 aic->jiffies = now;
1351}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001352
Sathya Perla2632baf2013-10-01 16:00:00 +05301353static void be_eqd_update(struct be_adapter *adapter)
1354{
1355 struct be_set_eqd set_eqd[MAX_EVT_QS];
1356 int eqd, i, num = 0, start;
1357 struct be_aic_obj *aic;
1358 struct be_eq_obj *eqo;
1359 struct be_rx_obj *rxo;
1360 struct be_tx_obj *txo;
1361 u64 rx_pkts, tx_pkts;
1362 ulong now;
1363 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364
Sathya Perla2632baf2013-10-01 16:00:00 +05301365 for_all_evt_queues(adapter, eqo, i) {
1366 aic = &adapter->aic_obj[eqo->idx];
1367 if (!aic->enable) {
1368 if (aic->jiffies)
1369 aic->jiffies = 0;
1370 eqd = aic->et_eqd;
1371 goto modify_eqd;
1372 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla2632baf2013-10-01 16:00:00 +05301374 rxo = &adapter->rx_obj[eqo->idx];
1375 do {
1376 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1377 rx_pkts = rxo->stats.rx_pkts;
1378 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379
Sathya Perla2632baf2013-10-01 16:00:00 +05301380 txo = &adapter->tx_obj[eqo->idx];
1381 do {
1382 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1383 tx_pkts = txo->stats.tx_reqs;
1384 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001385
Sathya Perla4097f662009-03-24 16:40:13 -07001386
Sathya Perla2632baf2013-10-01 16:00:00 +05301387 /* Skip, if wrapped around or first calculation */
1388 now = jiffies;
1389 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1390 rx_pkts < aic->rx_pkts_prev ||
1391 tx_pkts < aic->tx_reqs_prev) {
1392 be_aic_update(aic, rx_pkts, tx_pkts, now);
1393 continue;
1394 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001395
Sathya Perla2632baf2013-10-01 16:00:00 +05301396 delta = jiffies_to_msecs(now - aic->jiffies);
1397 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1398 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1399 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001400
Sathya Perla2632baf2013-10-01 16:00:00 +05301401 if (eqd < 8)
1402 eqd = 0;
1403 eqd = min_t(u32, eqd, aic->max_eqd);
1404 eqd = max_t(u32, eqd, aic->min_eqd);
1405
1406 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001407modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301408 if (eqd != aic->prev_eqd) {
1409 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1410 set_eqd[num].eq_id = eqo->q.id;
1411 aic->prev_eqd = eqd;
1412 num++;
1413 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001414 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301415
1416 if (num)
1417 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001418}
1419
Sathya Perla3abcded2010-10-03 22:12:27 -07001420static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001421 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001422{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001423 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001424
Sathya Perlaab1594e2011-07-25 19:10:15 +00001425 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001426 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001427 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001428 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001430 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001431 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001432 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001433 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434}
1435
Sathya Perla2e588f82011-03-11 02:49:26 +00001436static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001437{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001438 /* L4 checksum is not reliable for non TCP/UDP packets.
1439 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001440 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1441 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001442}
1443
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301444static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001446 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001448 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301449 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450
Sathya Perla3abcded2010-10-03 22:12:27 -07001451 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 BUG_ON(!rx_page_info->page);
1453
Ajit Khaparde205859a2010-02-09 01:34:21 +00001454 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001455 dma_unmap_page(&adapter->pdev->dev,
1456 dma_unmap_addr(rx_page_info, bus),
1457 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001458 rx_page_info->last_page_user = false;
1459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301461 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 atomic_dec(&rxq->used);
1463 return rx_page_info;
1464}
1465
1466/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001467static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001471 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001473 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301474 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001475 put_page(page_info->page);
1476 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 }
1478}
1479
1480/*
1481 * skb_fill_rx_data forms a complete skb for an ether frame
1482 * indicated by rxcp.
1483 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001484static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1485 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001488 u16 i, j;
1489 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 u8 *start;
1491
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301492 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 start = page_address(page_info->page) + page_info->page_offset;
1494 prefetch(start);
1495
1496 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001497 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 skb->len = curr_frag_len;
1500 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001501 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502 /* Complete packet has now been moved to data */
1503 put_page(page_info->page);
1504 skb->data_len = 0;
1505 skb->tail += curr_frag_len;
1506 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001507 hdr_len = ETH_HLEN;
1508 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001510 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 skb_shinfo(skb)->frags[0].page_offset =
1512 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001513 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001515 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 skb->tail += hdr_len;
1517 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001518 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519
Sathya Perla2e588f82011-03-11 02:49:26 +00001520 if (rxcp->pkt_size <= rx_frag_size) {
1521 BUG_ON(rxcp->num_rcvd != 1);
1522 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 }
1524
1525 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001526 remaining = rxcp->pkt_size - curr_frag_len;
1527 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301528 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001531 /* Coalesce all frags from the same physical page in one slot */
1532 if (page_info->page_offset == 0) {
1533 /* Fresh page */
1534 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001535 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001536 skb_shinfo(skb)->frags[j].page_offset =
1537 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001538 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001539 skb_shinfo(skb)->nr_frags++;
1540 } else {
1541 put_page(page_info->page);
1542 }
1543
Eric Dumazet9e903e02011-10-18 21:00:24 +00001544 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 skb->len += curr_frag_len;
1546 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001547 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001548 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001549 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001551 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552}
1553
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001554/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301555static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001556 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001559 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001561
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001562 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001563 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001564 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001565 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 return;
1567 }
1568
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001569 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001571 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001572 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001573 else
1574 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001576 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001577 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001578 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001579 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301580 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Jiri Pirko343e43c2011-08-25 02:50:51 +00001582 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001584
1585 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586}
1587
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001588/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001589static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1590 struct napi_struct *napi,
1591 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001593 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001595 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001596 u16 remaining, curr_frag_len;
1597 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001598
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001599 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001600 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001602 return;
1603 }
1604
Sathya Perla2e588f82011-03-11 02:49:26 +00001605 remaining = rxcp->pkt_size;
1606 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301607 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608
1609 curr_frag_len = min(remaining, rx_frag_size);
1610
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001611 /* Coalesce all frags from the same physical page in one slot */
1612 if (i == 0 || page_info->page_offset == 0) {
1613 /* First frag or Fresh page */
1614 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001615 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001616 skb_shinfo(skb)->frags[j].page_offset =
1617 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001618 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001619 } else {
1620 put_page(page_info->page);
1621 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001622 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001623 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 memset(page_info, 0, sizeof(*page_info));
1626 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001627 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001629 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001630 skb->len = rxcp->pkt_size;
1631 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001632 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001633 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001634 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001635 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301636 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001637
Jiri Pirko343e43c2011-08-25 02:50:51 +00001638 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001639 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001640
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001641 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642}
1643
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001644static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1645 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646{
Sathya Perla2e588f82011-03-11 02:49:26 +00001647 rxcp->pkt_size =
1648 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1649 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1650 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1651 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001652 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 rxcp->ip_csum =
1654 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1655 rxcp->l4_csum =
1656 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1657 rxcp->ipv6 =
1658 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001659 rxcp->num_rcvd =
1660 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1661 rxcp->pkt_type =
1662 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001663 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001664 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001665 if (rxcp->vlanf) {
1666 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001667 compl);
1668 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1669 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001670 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001671 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001672}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1675 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001676{
1677 rxcp->pkt_size =
1678 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1679 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1680 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1681 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001682 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001683 rxcp->ip_csum =
1684 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1685 rxcp->l4_csum =
1686 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1687 rxcp->ipv6 =
1688 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001689 rxcp->num_rcvd =
1690 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1691 rxcp->pkt_type =
1692 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001693 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001694 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001695 if (rxcp->vlanf) {
1696 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001697 compl);
1698 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1699 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001700 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001701 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001702 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1703 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001704}
1705
1706static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1707{
1708 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1709 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1710 struct be_adapter *adapter = rxo->adapter;
1711
1712 /* For checking the valid bit it is Ok to use either definition as the
1713 * valid bit is at the same position in both v0 and v1 Rx compl */
1714 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 return NULL;
1716
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001717 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001718 be_dws_le_to_cpu(compl, sizeof(*compl));
1719
1720 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001721 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001722 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001723 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001724
Somnath Koture38b1702013-05-29 22:55:56 +00001725 if (rxcp->ip_frag)
1726 rxcp->l4_csum = 0;
1727
Sathya Perla15d72182011-03-21 20:49:26 +00001728 if (rxcp->vlanf) {
1729 /* vlanf could be wrongly set in some cards.
1730 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001731 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001732 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001733
Sathya Perla15d72182011-03-21 20:49:26 +00001734 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001735 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001736
Somnath Kotur939cf302011-08-18 21:51:49 -07001737 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001738 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001739 rxcp->vlanf = 0;
1740 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001741
1742 /* As the compl has been parsed, reset it; we wont touch it again */
1743 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744
Sathya Perla3abcded2010-10-03 22:12:27 -07001745 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 return rxcp;
1747}
1748
Eric Dumazet1829b082011-03-01 05:48:12 +00001749static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001752
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001754 gfp |= __GFP_COMP;
1755 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756}
1757
1758/*
1759 * Allocate a page, split it to fragments of size rx_frag_size and post as
1760 * receive buffers to BE
1761 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001762static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763{
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001765 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001768 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769 struct be_eth_rx_d *rxd;
1770 u64 page_dmaaddr = 0, frag_dmaaddr;
1771 u32 posted, page_offset = 0;
1772
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1775 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001776 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001778 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 break;
1780 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001781 page_dmaaddr = dma_map_page(dev, pagep, 0,
1782 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001783 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001784 if (dma_mapping_error(dev, page_dmaaddr)) {
1785 put_page(pagep);
1786 pagep = NULL;
1787 rx_stats(rxo)->rx_post_fail++;
1788 break;
1789 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790 page_info->page_offset = 0;
1791 } else {
1792 get_page(pagep);
1793 page_info->page_offset = page_offset + rx_frag_size;
1794 }
1795 page_offset = page_info->page_offset;
1796 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001797 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1799
1800 rxd = queue_head_node(rxq);
1801 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1802 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
1804 /* Any space left in the current big page for another frag? */
1805 if ((page_offset + rx_frag_size + rx_frag_size) >
1806 adapter->big_page_size) {
1807 pagep = NULL;
1808 page_info->last_page_user = true;
1809 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001810
1811 prev_page_info = page_info;
1812 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814 }
1815 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001816 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817
1818 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301820 if (rxo->rx_post_starved)
1821 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001822 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001823 } else if (atomic_read(&rxq->used) == 0) {
1824 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827}
1828
Sathya Perla5fb379e2009-06-18 00:02:59 +00001829static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1832
1833 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1834 return NULL;
1835
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001836 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1838
1839 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1840
1841 queue_tail_inc(tx_cq);
1842 return txcp;
1843}
1844
Sathya Perla3c8def92011-06-12 20:01:58 +00001845static u16 be_tx_compl_process(struct be_adapter *adapter,
1846 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847{
Sathya Perla3c8def92011-06-12 20:01:58 +00001848 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001849 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001850 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001852 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1853 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001855 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001857 sent_skbs[txq->tail] = NULL;
1858
1859 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001860 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001862 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001864 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001865 unmap_tx_frag(&adapter->pdev->dev, wrb,
1866 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001867 unmap_skb_hdr = false;
1868
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 num_wrbs++;
1870 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001871 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001874 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875}
1876
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001877/* Return the number of events in the event queue */
1878static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001879{
1880 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001881 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001882
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 do {
1884 eqe = queue_tail_node(&eqo->q);
1885 if (eqe->evt == 0)
1886 break;
1887
1888 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001889 eqe->evt = 0;
1890 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 queue_tail_inc(&eqo->q);
1892 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001893
1894 return num;
1895}
1896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001897/* Leaves the EQ is disarmed state */
1898static void be_eq_clean(struct be_eq_obj *eqo)
1899{
1900 int num = events_get(eqo);
1901
1902 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1903}
1904
1905static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906{
1907 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 struct be_queue_info *rxq = &rxo->q;
1909 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001910 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001911 struct be_adapter *adapter = rxo->adapter;
1912 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913
Sathya Perlad23e9462012-12-17 19:38:51 +00001914 /* Consume pending rx completions.
1915 * Wait for the flush completion (identified by zero num_rcvd)
1916 * to arrive. Notify CQ even when there are no more CQ entries
1917 * for HW to flush partially coalesced CQ entries.
1918 * In Lancer, there is no need to wait for flush compl.
1919 */
1920 for (;;) {
1921 rxcp = be_rx_compl_get(rxo);
1922 if (rxcp == NULL) {
1923 if (lancer_chip(adapter))
1924 break;
1925
1926 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1927 dev_warn(&adapter->pdev->dev,
1928 "did not receive flush compl\n");
1929 break;
1930 }
1931 be_cq_notify(adapter, rx_cq->id, true, 0);
1932 mdelay(1);
1933 } else {
1934 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001935 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001936 if (rxcp->num_rcvd == 0)
1937 break;
1938 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 }
1940
Sathya Perlad23e9462012-12-17 19:38:51 +00001941 /* After cleanup, leave the CQ in unarmed state */
1942 be_cq_notify(adapter, rx_cq->id, false, 0);
1943
1944 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301945 while (atomic_read(&rxq->used) > 0) {
1946 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 put_page(page_info->page);
1948 memset(page_info, 0, sizeof(*page_info));
1949 }
1950 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001951 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952}
1953
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001954static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001956 struct be_tx_obj *txo;
1957 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001958 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001959 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001960 struct sk_buff *sent_skb;
1961 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001962 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Sathya Perlaa8e91792009-08-10 03:42:43 +00001964 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1965 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001966 pending_txqs = adapter->num_tx_qs;
1967
1968 for_all_tx_queues(adapter, txo, i) {
1969 txq = &txo->q;
1970 while ((txcp = be_tx_compl_get(&txo->cq))) {
1971 end_idx =
1972 AMAP_GET_BITS(struct amap_eth_tx_compl,
1973 wrb_index, txcp);
1974 num_wrbs += be_tx_compl_process(adapter, txo,
1975 end_idx);
1976 cmpl++;
1977 }
1978 if (cmpl) {
1979 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1980 atomic_sub(num_wrbs, &txq->used);
1981 cmpl = 0;
1982 num_wrbs = 0;
1983 }
1984 if (atomic_read(&txq->used) == 0)
1985 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001986 }
1987
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001988 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001989 break;
1990
1991 mdelay(1);
1992 } while (true);
1993
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001994 for_all_tx_queues(adapter, txo, i) {
1995 txq = &txo->q;
1996 if (atomic_read(&txq->used))
1997 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1998 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001999
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002000 /* free posted tx for which compls will never arrive */
2001 while (atomic_read(&txq->used)) {
2002 sent_skb = txo->sent_skb_list[txq->tail];
2003 end_idx = txq->tail;
2004 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2005 &dummy_wrb);
2006 index_adv(&end_idx, num_wrbs - 1, txq->len);
2007 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2008 atomic_sub(num_wrbs, &txq->used);
2009 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002010 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011}
2012
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013static void be_evt_queues_destroy(struct be_adapter *adapter)
2014{
2015 struct be_eq_obj *eqo;
2016 int i;
2017
2018 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002019 if (eqo->q.created) {
2020 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302022 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302023 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002024 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 be_queue_free(adapter, &eqo->q);
2026 }
2027}
2028
2029static int be_evt_queues_create(struct be_adapter *adapter)
2030{
2031 struct be_queue_info *eq;
2032 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302033 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 int i, rc;
2035
Sathya Perla92bf14a2013-08-27 16:57:32 +05302036 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2037 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038
2039 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302040 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2041 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302042 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302043 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 eqo->adapter = adapter;
2045 eqo->tx_budget = BE_TX_BUDGET;
2046 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302047 aic->max_eqd = BE_MAX_EQD;
2048 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049
2050 eq = &eqo->q;
2051 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2052 sizeof(struct be_eq_entry));
2053 if (rc)
2054 return rc;
2055
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302056 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 if (rc)
2058 return rc;
2059 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002060 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061}
2062
Sathya Perla5fb379e2009-06-18 00:02:59 +00002063static void be_mcc_queues_destroy(struct be_adapter *adapter)
2064{
2065 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002066
Sathya Perla8788fdc2009-07-27 22:52:03 +00002067 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002068 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002069 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002070 be_queue_free(adapter, q);
2071
Sathya Perla8788fdc2009-07-27 22:52:03 +00002072 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002073 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002074 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002075 be_queue_free(adapter, q);
2076}
2077
2078/* Must be called only after TX qs are created as MCC shares TX EQ */
2079static int be_mcc_queues_create(struct be_adapter *adapter)
2080{
2081 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002082
Sathya Perla8788fdc2009-07-27 22:52:03 +00002083 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002084 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002085 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002086 goto err;
2087
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 /* Use the default EQ for MCC completions */
2089 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090 goto mcc_cq_free;
2091
Sathya Perla8788fdc2009-07-27 22:52:03 +00002092 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002093 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2094 goto mcc_cq_destroy;
2095
Sathya Perla8788fdc2009-07-27 22:52:03 +00002096 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002097 goto mcc_q_free;
2098
2099 return 0;
2100
2101mcc_q_free:
2102 be_queue_free(adapter, q);
2103mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002104 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002105mcc_cq_free:
2106 be_queue_free(adapter, cq);
2107err:
2108 return -1;
2109}
2110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111static void be_tx_queues_destroy(struct be_adapter *adapter)
2112{
2113 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002114 struct be_tx_obj *txo;
2115 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Sathya Perla3c8def92011-06-12 20:01:58 +00002117 for_all_tx_queues(adapter, txo, i) {
2118 q = &txo->q;
2119 if (q->created)
2120 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2121 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Sathya Perla3c8def92011-06-12 20:01:58 +00002123 q = &txo->cq;
2124 if (q->created)
2125 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2126 be_queue_free(adapter, q);
2127 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128}
2129
Sathya Perla77071332013-08-27 16:57:34 +05302130static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002133 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302134 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135
Sathya Perla92bf14a2013-08-27 16:57:32 +05302136 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002137
Sathya Perla3c8def92011-06-12 20:01:58 +00002138 for_all_tx_queues(adapter, txo, i) {
2139 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2141 sizeof(struct be_eth_tx_compl));
2142 if (status)
2143 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144
John Stultz827da442013-10-07 15:51:58 -07002145 u64_stats_init(&txo->stats.sync);
2146 u64_stats_init(&txo->stats.sync_compl);
2147
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 /* If num_evt_qs is less than num_tx_qs, then more than
2149 * one txq share an eq
2150 */
2151 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2152 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2153 if (status)
2154 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2157 sizeof(struct be_eth_wrb));
2158 if (status)
2159 return status;
2160
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002161 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 if (status)
2163 return status;
2164 }
2165
Sathya Perlad3791422012-09-28 04:39:44 +00002166 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2167 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002168 return 0;
2169}
2170
2171static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172{
2173 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002174 struct be_rx_obj *rxo;
2175 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 q = &rxo->cq;
2179 if (q->created)
2180 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2181 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183}
2184
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002186{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002188 struct be_rx_obj *rxo;
2189 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
Sathya Perla92bf14a2013-08-27 16:57:32 +05302191 /* We can create as many RSS rings as there are EQs. */
2192 adapter->num_rx_qs = adapter->num_evt_qs;
2193
2194 /* We'll use RSS only if atleast 2 RSS rings are supported.
2195 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302197 if (adapter->num_rx_qs > 1)
2198 adapter->num_rx_qs++;
2199
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 for_all_rx_queues(adapter, rxo, i) {
2202 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 cq = &rxo->cq;
2204 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2205 sizeof(struct be_eth_rx_compl));
2206 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208
John Stultz827da442013-10-07 15:51:58 -07002209 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2211 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215
Sathya Perlad3791422012-09-28 04:39:44 +00002216 dev_info(&adapter->pdev->dev,
2217 "created %d RSS queue(s) and 1 default RX queue\n",
2218 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002220}
2221
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222static irqreturn_t be_intx(int irq, void *dev)
2223{
Sathya Perlae49cc342012-11-27 19:50:02 +00002224 struct be_eq_obj *eqo = dev;
2225 struct be_adapter *adapter = eqo->adapter;
2226 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002228 /* IRQ is not expected when NAPI is scheduled as the EQ
2229 * will not be armed.
2230 * But, this can happen on Lancer INTx where it takes
2231 * a while to de-assert INTx or in BE2 where occasionaly
2232 * an interrupt may be raised even when EQ is unarmed.
2233 * If NAPI is already scheduled, then counting & notifying
2234 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002235 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002236 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002237 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002238 __napi_schedule(&eqo->napi);
2239 if (num_evts)
2240 eqo->spurious_intr = 0;
2241 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002242 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002243
2244 /* Return IRQ_HANDLED only for the the first spurious intr
2245 * after a valid intr to stop the kernel from branding
2246 * this irq as a bad one!
2247 */
2248 if (num_evts || eqo->spurious_intr++ == 0)
2249 return IRQ_HANDLED;
2250 else
2251 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252}
2253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
Sathya Perla0b545a62012-11-23 00:27:18 +00002258 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2259 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260 return IRQ_HANDLED;
2261}
2262
Sathya Perla2e588f82011-03-11 02:49:26 +00002263static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264{
Somnath Koture38b1702013-05-29 22:55:56 +00002265 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266}
2267
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302269 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270{
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 struct be_adapter *adapter = rxo->adapter;
2272 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002273 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274 u32 work_done;
2275
2276 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278 if (!rxcp)
2279 break;
2280
Sathya Perla12004ae2011-08-02 19:57:46 +00002281 /* Is it a flush compl that has no data */
2282 if (unlikely(rxcp->num_rcvd == 0))
2283 goto loop_continue;
2284
2285 /* Discard compl with partial DMA Lancer B0 */
2286 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002288 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002289 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002290
Sathya Perla12004ae2011-08-02 19:57:46 +00002291 /* On BE drop pkts that arrive due to imperfect filtering in
2292 * promiscuous mode on some skews
2293 */
2294 if (unlikely(rxcp->port != adapter->port_num &&
2295 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002297 goto loop_continue;
2298 }
2299
Sathya Perla6384a4d2013-10-25 10:40:16 +05302300 /* Don't do gro when we're busy_polling */
2301 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002302 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002303 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302304 be_rx_compl_process(rxo, napi, rxcp);
2305
Sathya Perla12004ae2011-08-02 19:57:46 +00002306loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002307 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 }
2309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 if (work_done) {
2311 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002312
Sathya Perla6384a4d2013-10-25 10:40:16 +05302313 /* When an rx-obj gets into post_starved state, just
2314 * let be_worker do the posting.
2315 */
2316 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2317 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 return work_done;
2322}
2323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2325 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 for (work_done = 0; work_done < budget; work_done++) {
2331 txcp = be_tx_compl_get(&txo->cq);
2332 if (!txcp)
2333 break;
2334 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002335 AMAP_GET_BITS(struct amap_eth_tx_compl,
2336 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 }
2338
2339 if (work_done) {
2340 be_cq_notify(adapter, txo->cq.id, true, work_done);
2341 atomic_sub(num_wrbs, &txo->q.used);
2342
2343 /* As Tx wrbs have been freed up, wake up netdev queue
2344 * if it was stopped due to lack of tx wrbs. */
2345 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2346 atomic_read(&txo->q.used) < txo->q.len / 2) {
2347 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002348 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2351 tx_stats(txo)->tx_compl += work_done;
2352 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2353 }
2354 return (work_done < budget); /* Done */
2355}
Sathya Perla3c8def92011-06-12 20:01:58 +00002356
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302357int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002358{
2359 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2360 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002361 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302362 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002364
Sathya Perla0b545a62012-11-23 00:27:18 +00002365 num_evts = events_get(eqo);
2366
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002367 /* Process all TXQs serviced by this EQ */
2368 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2369 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2370 eqo->tx_budget, i);
2371 if (!tx_done)
2372 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373 }
2374
Sathya Perla6384a4d2013-10-25 10:40:16 +05302375 if (be_lock_napi(eqo)) {
2376 /* This loop will iterate twice for EQ0 in which
2377 * completions of the last RXQ (default one) are also processed
2378 * For other EQs the loop iterates only once
2379 */
2380 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2381 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2382 max_work = max(work, max_work);
2383 }
2384 be_unlock_napi(eqo);
2385 } else {
2386 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002387 }
2388
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 if (is_mcc_eqo(eqo))
2390 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392 if (max_work < budget) {
2393 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002394 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395 } else {
2396 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002397 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002398 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400}
2401
Sathya Perla6384a4d2013-10-25 10:40:16 +05302402#ifdef CONFIG_NET_RX_BUSY_POLL
2403static int be_busy_poll(struct napi_struct *napi)
2404{
2405 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2406 struct be_adapter *adapter = eqo->adapter;
2407 struct be_rx_obj *rxo;
2408 int i, work = 0;
2409
2410 if (!be_lock_busy_poll(eqo))
2411 return LL_FLUSH_BUSY;
2412
2413 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2414 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2415 if (work)
2416 break;
2417 }
2418
2419 be_unlock_busy_poll(eqo);
2420 return work;
2421}
2422#endif
2423
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002424void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002425{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002426 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2427 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002428 u32 i;
2429
Sathya Perlad23e9462012-12-17 19:38:51 +00002430 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002431 return;
2432
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002433 if (lancer_chip(adapter)) {
2434 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2435 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2436 sliport_err1 = ioread32(adapter->db +
2437 SLIPORT_ERROR1_OFFSET);
2438 sliport_err2 = ioread32(adapter->db +
2439 SLIPORT_ERROR2_OFFSET);
2440 }
2441 } else {
2442 pci_read_config_dword(adapter->pdev,
2443 PCICFG_UE_STATUS_LOW, &ue_lo);
2444 pci_read_config_dword(adapter->pdev,
2445 PCICFG_UE_STATUS_HIGH, &ue_hi);
2446 pci_read_config_dword(adapter->pdev,
2447 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2448 pci_read_config_dword(adapter->pdev,
2449 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002450
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002451 ue_lo = (ue_lo & ~ue_lo_mask);
2452 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002453 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002454
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002455 /* On certain platforms BE hardware can indicate spurious UEs.
2456 * Allow the h/w to stop working completely in case of a real UE.
2457 * Hence not setting the hw_error for UE detection.
2458 */
2459 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002460 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302461 /* Do not log error messages if its a FW reset */
2462 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2463 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2464 dev_info(&adapter->pdev->dev,
2465 "Firmware update in progress\n");
2466 return;
2467 } else {
2468 dev_err(&adapter->pdev->dev,
2469 "Error detected in the card\n");
2470 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002471 }
2472
2473 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2474 dev_err(&adapter->pdev->dev,
2475 "ERR: sliport status 0x%x\n", sliport_status);
2476 dev_err(&adapter->pdev->dev,
2477 "ERR: sliport error1 0x%x\n", sliport_err1);
2478 dev_err(&adapter->pdev->dev,
2479 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002480 }
2481
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002482 if (ue_lo) {
2483 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2484 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002485 dev_err(&adapter->pdev->dev,
2486 "UE: %s bit set\n", ue_status_low_desc[i]);
2487 }
2488 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002489
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002490 if (ue_hi) {
2491 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2492 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002493 dev_err(&adapter->pdev->dev,
2494 "UE: %s bit set\n", ue_status_hi_desc[i]);
2495 }
2496 }
2497
2498}
2499
Sathya Perla8d56ff12009-11-22 22:02:26 +00002500static void be_msix_disable(struct be_adapter *adapter)
2501{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002502 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002503 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002504 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302505 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002506 }
2507}
2508
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002509static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302511 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002512 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513
Sathya Perla92bf14a2013-08-27 16:57:32 +05302514 /* If RoCE is supported, program the max number of NIC vectors that
2515 * may be configured via set-channels, along with vectors needed for
2516 * RoCe. Else, just program the number we'll use initially.
2517 */
2518 if (be_roce_supported(adapter))
2519 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2520 2 * num_online_cpus());
2521 else
2522 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002523
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002524 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002525 adapter->msix_entries[i].entry = i;
2526
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002527 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002528 if (status == 0) {
2529 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302530 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002531 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002532 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2533 num_vec);
2534 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002535 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002536 }
Sathya Perlad3791422012-09-28 04:39:44 +00002537
2538 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302539
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002540 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2541 if (!be_physfn(adapter))
2542 return status;
2543 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002544done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302545 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2546 adapter->num_msix_roce_vec = num_vec / 2;
2547 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2548 adapter->num_msix_roce_vec);
2549 }
2550
2551 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2552
2553 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2554 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002555 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556}
2557
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002558static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302561 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562}
2563
2564static int be_msix_register(struct be_adapter *adapter)
2565{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 struct net_device *netdev = adapter->netdev;
2567 struct be_eq_obj *eqo;
2568 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002570 for_all_evt_queues(adapter, eqo, i) {
2571 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2572 vec = be_msix_vec_get(adapter, eqo);
2573 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002574 if (status)
2575 goto err_msix;
2576 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002577
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002578 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002579err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002580 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2581 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2582 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2583 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002584 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002585 return status;
2586}
2587
2588static int be_irq_register(struct be_adapter *adapter)
2589{
2590 struct net_device *netdev = adapter->netdev;
2591 int status;
2592
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002593 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 status = be_msix_register(adapter);
2595 if (status == 0)
2596 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002597 /* INTx is not supported for VF */
2598 if (!be_physfn(adapter))
2599 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 }
2601
Sathya Perlae49cc342012-11-27 19:50:02 +00002602 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603 netdev->irq = adapter->pdev->irq;
2604 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002605 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606 if (status) {
2607 dev_err(&adapter->pdev->dev,
2608 "INTx request IRQ failed - err %d\n", status);
2609 return status;
2610 }
2611done:
2612 adapter->isr_registered = true;
2613 return 0;
2614}
2615
2616static void be_irq_unregister(struct be_adapter *adapter)
2617{
2618 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002620 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621
2622 if (!adapter->isr_registered)
2623 return;
2624
2625 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002626 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002627 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628 goto done;
2629 }
2630
2631 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632 for_all_evt_queues(adapter, eqo, i)
2633 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002634
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635done:
2636 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637}
2638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002640{
2641 struct be_queue_info *q;
2642 struct be_rx_obj *rxo;
2643 int i;
2644
2645 for_all_rx_queues(adapter, rxo, i) {
2646 q = &rxo->q;
2647 if (q->created) {
2648 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002649 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002650 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002652 }
2653}
2654
Sathya Perla889cd4b2010-05-30 23:33:45 +00002655static int be_close(struct net_device *netdev)
2656{
2657 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 struct be_eq_obj *eqo;
2659 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002660
Parav Pandit045508a2012-03-26 14:27:13 +00002661 be_roce_dev_close(adapter);
2662
Ivan Veceradff345c52013-11-27 08:59:32 +01002663 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2664 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002665 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302666 be_disable_busy_poll(eqo);
2667 }
David S. Miller71237b62013-11-28 18:53:36 -05002668 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002669 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002670
2671 be_async_mcc_disable(adapter);
2672
2673 /* Wait for all pending tx completions to arrive so that
2674 * all tx skbs are freed.
2675 */
Sathya Perlafba87552013-05-08 02:05:50 +00002676 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302677 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002678
2679 be_rx_qs_destroy(adapter);
2680
Ajit Khaparded11a3472013-11-18 10:44:37 -06002681 for (i = 1; i < (adapter->uc_macs + 1); i++)
2682 be_cmd_pmac_del(adapter, adapter->if_handle,
2683 adapter->pmac_id[i], 0);
2684 adapter->uc_macs = 0;
2685
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002686 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687 if (msix_enabled(adapter))
2688 synchronize_irq(be_msix_vec_get(adapter, eqo));
2689 else
2690 synchronize_irq(netdev->irq);
2691 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002692 }
2693
Sathya Perla889cd4b2010-05-30 23:33:45 +00002694 be_irq_unregister(adapter);
2695
Sathya Perla482c9e72011-06-29 23:33:17 +00002696 return 0;
2697}
2698
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002700{
2701 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002702 int rc, i, j;
2703 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002704
2705 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2707 sizeof(struct be_eth_rx_d));
2708 if (rc)
2709 return rc;
2710 }
2711
2712 /* The FW would like the default RXQ to be created first */
2713 rxo = default_rxo(adapter);
2714 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2715 adapter->if_handle, false, &rxo->rss_id);
2716 if (rc)
2717 return rc;
2718
2719 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002720 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002721 rx_frag_size, adapter->if_handle,
2722 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002723 if (rc)
2724 return rc;
2725 }
2726
2727 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002728 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2729 for_all_rss_queues(adapter, rxo, i) {
2730 if ((j + i) >= 128)
2731 break;
2732 rsstable[j + i] = rxo->rss_id;
2733 }
2734 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002735 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2736 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2737
2738 if (!BEx_chip(adapter))
2739 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2740 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302741 } else {
2742 /* Disable RSS, if only default RX Q is created */
2743 adapter->rss_flags = RSS_ENABLE_NONE;
2744 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002745
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302746 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2747 128);
2748 if (rc) {
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002751 }
2752
2753 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002755 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002756 return 0;
2757}
2758
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759static int be_open(struct net_device *netdev)
2760{
2761 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002762 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002763 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002765 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002766 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002767
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002769 if (status)
2770 goto err;
2771
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002772 status = be_irq_register(adapter);
2773 if (status)
2774 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002776 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002777 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002778
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002779 for_all_tx_queues(adapter, txo, i)
2780 be_cq_notify(adapter, txo->cq.id, true, 0);
2781
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002782 be_async_mcc_enable(adapter);
2783
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002784 for_all_evt_queues(adapter, eqo, i) {
2785 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302786 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002787 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2788 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002789 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790
Sathya Perla323ff712012-09-28 04:39:43 +00002791 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002792 if (!status)
2793 be_link_status_update(adapter, link_status);
2794
Sathya Perlafba87552013-05-08 02:05:50 +00002795 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002796 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002797 return 0;
2798err:
2799 be_close(adapter->netdev);
2800 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002801}
2802
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002803static int be_setup_wol(struct be_adapter *adapter, bool enable)
2804{
2805 struct be_dma_mem cmd;
2806 int status = 0;
2807 u8 mac[ETH_ALEN];
2808
2809 memset(mac, 0, ETH_ALEN);
2810
2811 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002812 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2813 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002814 if (cmd.va == NULL)
2815 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002816
2817 if (enable) {
2818 status = pci_write_config_dword(adapter->pdev,
2819 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2820 if (status) {
2821 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002822 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002823 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2824 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002825 return status;
2826 }
2827 status = be_cmd_enable_magic_wol(adapter,
2828 adapter->netdev->dev_addr, &cmd);
2829 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2830 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2831 } else {
2832 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2833 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2834 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2835 }
2836
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002837 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002838 return status;
2839}
2840
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002841/*
2842 * Generate a seed MAC address from the PF MAC Address using jhash.
2843 * MAC Address for VFs are assigned incrementally starting from the seed.
2844 * These addresses are programmed in the ASIC by the PF and the VF driver
2845 * queries for the MAC address during its probe.
2846 */
Sathya Perla4c876612013-02-03 20:30:11 +00002847static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002848{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002849 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002850 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002851 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002852 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002853
2854 be_vf_eth_addr_generate(adapter, mac);
2855
Sathya Perla11ac75e2011-12-13 00:58:50 +00002856 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302857 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002858 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002859 vf_cfg->if_handle,
2860 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302861 else
2862 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2863 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002864
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002865 if (status)
2866 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002867 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002868 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002869 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002870
2871 mac[5] += 1;
2872 }
2873 return status;
2874}
2875
Sathya Perla4c876612013-02-03 20:30:11 +00002876static int be_vfs_mac_query(struct be_adapter *adapter)
2877{
2878 int status, vf;
2879 u8 mac[ETH_ALEN];
2880 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002881
2882 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302883 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2884 mac, vf_cfg->if_handle,
2885 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002886 if (status)
2887 return status;
2888 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2889 }
2890 return 0;
2891}
2892
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002893static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002894{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002895 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002896 u32 vf;
2897
Sathya Perla257a3fe2013-06-14 15:54:51 +05302898 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002899 dev_warn(&adapter->pdev->dev,
2900 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002901 goto done;
2902 }
2903
Sathya Perlab4c1df92013-05-08 02:05:47 +00002904 pci_disable_sriov(adapter->pdev);
2905
Sathya Perla11ac75e2011-12-13 00:58:50 +00002906 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302907 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002908 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2909 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302910 else
2911 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2912 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002913
Sathya Perla11ac75e2011-12-13 00:58:50 +00002914 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2915 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002916done:
2917 kfree(adapter->vf_cfg);
2918 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002919}
2920
Sathya Perla77071332013-08-27 16:57:34 +05302921static void be_clear_queues(struct be_adapter *adapter)
2922{
2923 be_mcc_queues_destroy(adapter);
2924 be_rx_cqs_destroy(adapter);
2925 be_tx_queues_destroy(adapter);
2926 be_evt_queues_destroy(adapter);
2927}
2928
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302929static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002930{
Sathya Perla191eb752012-02-23 18:50:13 +00002931 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2932 cancel_delayed_work_sync(&adapter->work);
2933 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2934 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302935}
2936
Somnath Koturb05004a2013-12-05 12:08:16 +05302937static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302938{
2939 int i;
2940
Somnath Koturb05004a2013-12-05 12:08:16 +05302941 if (adapter->pmac_id) {
2942 for (i = 0; i < (adapter->uc_macs + 1); i++)
2943 be_cmd_pmac_del(adapter, adapter->if_handle,
2944 adapter->pmac_id[i], 0);
2945 adapter->uc_macs = 0;
2946
2947 kfree(adapter->pmac_id);
2948 adapter->pmac_id = NULL;
2949 }
2950}
2951
2952static int be_clear(struct be_adapter *adapter)
2953{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302954 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002955
Sathya Perla11ac75e2011-12-13 00:58:50 +00002956 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002957 be_vf_clear(adapter);
2958
Sathya Perla2d17f402013-07-23 15:25:04 +05302959 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302960 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002961
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002962 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002963
Sathya Perla77071332013-08-27 16:57:34 +05302964 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002966 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002967 return 0;
2968}
2969
Sathya Perla4c876612013-02-03 20:30:11 +00002970static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002971{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302972 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002973 struct be_vf_cfg *vf_cfg;
2974 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002975 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002976
Sathya Perla4c876612013-02-03 20:30:11 +00002977 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2978 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002979
Sathya Perla4c876612013-02-03 20:30:11 +00002980 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302981 if (!BE3_chip(adapter)) {
2982 status = be_cmd_get_profile_config(adapter, &res,
2983 vf + 1);
2984 if (!status)
2985 cap_flags = res.if_cap_flags;
2986 }
Sathya Perla4c876612013-02-03 20:30:11 +00002987
2988 /* If a FW profile exists, then cap_flags are updated */
2989 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2990 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2991 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2992 &vf_cfg->if_handle, vf + 1);
2993 if (status)
2994 goto err;
2995 }
2996err:
2997 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002998}
2999
Sathya Perla39f1d942012-05-08 19:41:24 +00003000static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003001{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003002 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003003 int vf;
3004
Sathya Perla39f1d942012-05-08 19:41:24 +00003005 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3006 GFP_KERNEL);
3007 if (!adapter->vf_cfg)
3008 return -ENOMEM;
3009
Sathya Perla11ac75e2011-12-13 00:58:50 +00003010 for_all_vfs(adapter, vf_cfg, vf) {
3011 vf_cfg->if_handle = -1;
3012 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003013 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003014 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003015}
3016
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003017static int be_vf_setup(struct be_adapter *adapter)
3018{
Sathya Perla4c876612013-02-03 20:30:11 +00003019 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303020 struct be_vf_cfg *vf_cfg;
3021 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303022 u32 privileges;
Somnath Koturc5022242014-03-03 14:24:20 +05303023 u16 lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003024
Sathya Perla257a3fe2013-06-14 15:54:51 +05303025 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003026 if (old_vfs) {
3027 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3028 if (old_vfs != num_vfs)
3029 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3030 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003031 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303032 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003033 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303034 be_max_vfs(adapter), num_vfs);
3035 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003036 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003037 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003038 }
3039
3040 status = be_vf_setup_init(adapter);
3041 if (status)
3042 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003043
Sathya Perla4c876612013-02-03 20:30:11 +00003044 if (old_vfs) {
3045 for_all_vfs(adapter, vf_cfg, vf) {
3046 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3047 if (status)
3048 goto err;
3049 }
3050 } else {
3051 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003052 if (status)
3053 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003054 }
3055
Sathya Perla4c876612013-02-03 20:30:11 +00003056 if (old_vfs) {
3057 status = be_vfs_mac_query(adapter);
3058 if (status)
3059 goto err;
3060 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003061 status = be_vf_eth_addr_config(adapter);
3062 if (status)
3063 goto err;
3064 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003065
Sathya Perla11ac75e2011-12-13 00:58:50 +00003066 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303067 /* Allow VFs to programs MAC/VLAN filters */
3068 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3069 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3070 status = be_cmd_set_fn_privileges(adapter,
3071 privileges |
3072 BE_PRIV_FILTMGMT,
3073 vf + 1);
3074 if (!status)
3075 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3076 vf);
3077 }
3078
Sathya Perla4c876612013-02-03 20:30:11 +00003079 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3080 * Allow full available bandwidth
3081 */
3082 if (BE3_chip(adapter) && !old_vfs)
3083 be_cmd_set_qos(adapter, 1000, vf+1);
3084
3085 status = be_cmd_link_status_query(adapter, &lnk_speed,
3086 NULL, vf + 1);
3087 if (!status)
3088 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003089
Vasundhara Volam05998632013-10-01 15:59:59 +05303090 if (!old_vfs)
3091 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003092 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003093
3094 if (!old_vfs) {
3095 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3096 if (status) {
3097 dev_err(dev, "SRIOV enable failed\n");
3098 adapter->num_vfs = 0;
3099 goto err;
3100 }
3101 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003102 return 0;
3103err:
Sathya Perla4c876612013-02-03 20:30:11 +00003104 dev_err(dev, "VF setup failed\n");
3105 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003106 return status;
3107}
3108
Sathya Perla92bf14a2013-08-27 16:57:32 +05303109/* On BE2/BE3 FW does not suggest the supported limits */
3110static void BEx_get_resources(struct be_adapter *adapter,
3111 struct be_resources *res)
3112{
3113 struct pci_dev *pdev = adapter->pdev;
3114 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303115 int max_vfs;
3116
3117 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303118
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303119 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303120 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303121 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303122 }
3123
3124 if (be_physfn(adapter))
3125 res->max_uc_mac = BE_UC_PMAC_COUNT;
3126 else
3127 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3128
3129 if (adapter->function_mode & FLEX10_MODE)
3130 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003131 else if (adapter->function_mode & UMC_ENABLED)
3132 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303133 else
3134 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3135 res->max_mcast_mac = BE_MAX_MC;
3136
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303137 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303138 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303139 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303140 res->max_tx_qs = 1;
3141 else
3142 res->max_tx_qs = BE3_MAX_TX_QS;
3143
3144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3145 !use_sriov && be_physfn(adapter))
3146 res->max_rss_qs = (adapter->be3_native) ?
3147 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3148 res->max_rx_qs = res->max_rss_qs + 1;
3149
Suresh Reddye3dc8672014-01-06 13:02:25 +05303150 if (be_physfn(adapter))
3151 res->max_evt_qs = (max_vfs > 0) ?
3152 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3153 else
3154 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303155
3156 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3157 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3158 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3159}
3160
Sathya Perla30128032011-11-10 19:17:57 +00003161static void be_setup_init(struct be_adapter *adapter)
3162{
3163 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003164 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003165 adapter->if_handle = -1;
3166 adapter->be3_native = false;
3167 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003168 if (be_physfn(adapter))
3169 adapter->cmd_privileges = MAX_PRIVILEGES;
3170 else
3171 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003172}
3173
Sathya Perla92bf14a2013-08-27 16:57:32 +05303174static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003175{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303176 struct device *dev = &adapter->pdev->dev;
3177 struct be_resources res = {0};
3178 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003179
Sathya Perla92bf14a2013-08-27 16:57:32 +05303180 if (BEx_chip(adapter)) {
3181 BEx_get_resources(adapter, &res);
3182 adapter->res = res;
3183 }
3184
Sathya Perla92bf14a2013-08-27 16:57:32 +05303185 /* For Lancer, SH etc read per-function resource limits from FW.
3186 * GET_FUNC_CONFIG returns per function guaranteed limits.
3187 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3188 */
Sathya Perla4c876612013-02-03 20:30:11 +00003189 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303190 status = be_cmd_get_func_config(adapter, &res);
3191 if (status)
3192 return status;
3193
3194 /* If RoCE may be enabled stash away half the EQs for RoCE */
3195 if (be_roce_supported(adapter))
3196 res.max_evt_qs /= 2;
3197 adapter->res = res;
3198
3199 if (be_physfn(adapter)) {
3200 status = be_cmd_get_profile_config(adapter, &res, 0);
3201 if (status)
3202 return status;
3203 adapter->res.max_vfs = res.max_vfs;
3204 }
3205
3206 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3207 be_max_txqs(adapter), be_max_rxqs(adapter),
3208 be_max_rss(adapter), be_max_eqs(adapter),
3209 be_max_vfs(adapter));
3210 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3211 be_max_uc(adapter), be_max_mc(adapter),
3212 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003213 }
3214
Sathya Perla92bf14a2013-08-27 16:57:32 +05303215 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003216}
3217
Sathya Perla39f1d942012-05-08 19:41:24 +00003218/* Routine to query per function resource limits */
3219static int be_get_config(struct be_adapter *adapter)
3220{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303221 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003222 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003223
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003224 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3225 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003226 &adapter->function_caps,
3227 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003228 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303229 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003230
Vasundhara Volam542963b2014-01-15 13:23:33 +05303231 if (be_physfn(adapter)) {
3232 status = be_cmd_get_active_profile(adapter, &profile_id);
3233 if (!status)
3234 dev_info(&adapter->pdev->dev,
3235 "Using profile 0x%x\n", profile_id);
3236 }
3237
Sathya Perla92bf14a2013-08-27 16:57:32 +05303238 status = be_get_resources(adapter);
3239 if (status)
3240 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003241
3242 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303243 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3244 GFP_KERNEL);
3245 if (!adapter->pmac_id)
3246 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003247
Sathya Perla92bf14a2013-08-27 16:57:32 +05303248 /* Sanitize cfg_num_qs based on HW and platform limits */
3249 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3250
3251 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003252}
3253
Sathya Perla95046b92013-07-23 15:25:02 +05303254static int be_mac_setup(struct be_adapter *adapter)
3255{
3256 u8 mac[ETH_ALEN];
3257 int status;
3258
3259 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3260 status = be_cmd_get_perm_mac(adapter, mac);
3261 if (status)
3262 return status;
3263
3264 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3265 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3266 } else {
3267 /* Maybe the HW was reset; dev_addr must be re-programmed */
3268 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3269 }
3270
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003271 /* For BE3-R VFs, the PF programs the initial MAC address */
3272 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3273 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3274 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303275 return 0;
3276}
3277
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303278static void be_schedule_worker(struct be_adapter *adapter)
3279{
3280 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3281 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3282}
3283
Sathya Perla77071332013-08-27 16:57:34 +05303284static int be_setup_queues(struct be_adapter *adapter)
3285{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303286 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303287 int status;
3288
3289 status = be_evt_queues_create(adapter);
3290 if (status)
3291 goto err;
3292
3293 status = be_tx_qs_create(adapter);
3294 if (status)
3295 goto err;
3296
3297 status = be_rx_cqs_create(adapter);
3298 if (status)
3299 goto err;
3300
3301 status = be_mcc_queues_create(adapter);
3302 if (status)
3303 goto err;
3304
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303305 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3306 if (status)
3307 goto err;
3308
3309 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3310 if (status)
3311 goto err;
3312
Sathya Perla77071332013-08-27 16:57:34 +05303313 return 0;
3314err:
3315 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3316 return status;
3317}
3318
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303319int be_update_queues(struct be_adapter *adapter)
3320{
3321 struct net_device *netdev = adapter->netdev;
3322 int status;
3323
3324 if (netif_running(netdev))
3325 be_close(netdev);
3326
3327 be_cancel_worker(adapter);
3328
3329 /* If any vectors have been shared with RoCE we cannot re-program
3330 * the MSIx table.
3331 */
3332 if (!adapter->num_msix_roce_vec)
3333 be_msix_disable(adapter);
3334
3335 be_clear_queues(adapter);
3336
3337 if (!msix_enabled(adapter)) {
3338 status = be_msix_enable(adapter);
3339 if (status)
3340 return status;
3341 }
3342
3343 status = be_setup_queues(adapter);
3344 if (status)
3345 return status;
3346
3347 be_schedule_worker(adapter);
3348
3349 if (netif_running(netdev))
3350 status = be_open(netdev);
3351
3352 return status;
3353}
3354
Sathya Perla5fb379e2009-06-18 00:02:59 +00003355static int be_setup(struct be_adapter *adapter)
3356{
Sathya Perla39f1d942012-05-08 19:41:24 +00003357 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303358 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003359 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360
Sathya Perla30128032011-11-10 19:17:57 +00003361 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003362
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003363 if (!lancer_chip(adapter))
3364 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003365
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003366 status = be_get_config(adapter);
3367 if (status)
3368 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003369
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003370 status = be_msix_enable(adapter);
3371 if (status)
3372 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003373
Sathya Perla77071332013-08-27 16:57:34 +05303374 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3375 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3376 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3377 en_flags |= BE_IF_FLAGS_RSS;
3378 en_flags = en_flags & be_if_cap_flags(adapter);
3379 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3380 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003381 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003382 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303384 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3385 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303386 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303387 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003388 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003389 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003390
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003391 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003392
Sathya Perla95046b92013-07-23 15:25:02 +05303393 status = be_mac_setup(adapter);
3394 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003395 goto err;
3396
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003397 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003398
Somnath Koture9e2a902013-10-24 14:37:53 +05303399 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3400 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3401 adapter->fw_ver);
3402 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3403 }
3404
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003405 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003406 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003407
3408 be_set_rx_mode(adapter->netdev);
3409
Suresh Reddy76a9e082014-01-15 13:23:40 +05303410 be_cmd_get_acpi_wol_cap(adapter);
3411
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003412 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003413
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003414 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3415 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003416 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003417
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303418 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303419 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003420 be_vf_setup(adapter);
3421 else
3422 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003423 }
3424
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003425 status = be_cmd_get_phy_info(adapter);
3426 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003427 adapter->phy.fc_autoneg = 1;
3428
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303429 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003430 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003431err:
3432 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433 return status;
3434}
3435
Ivan Vecera66268732011-12-08 01:31:21 +00003436#ifdef CONFIG_NET_POLL_CONTROLLER
3437static void be_netpoll(struct net_device *netdev)
3438{
3439 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003440 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003441 int i;
3442
Sathya Perlae49cc342012-11-27 19:50:02 +00003443 for_all_evt_queues(adapter, eqo, i) {
3444 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3445 napi_schedule(&eqo->napi);
3446 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003447
3448 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003449}
3450#endif
3451
Ajit Khaparde84517482009-09-04 03:12:16 +00003452#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003453static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003454
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003455static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003456 const u8 *p, u32 img_start, int image_size,
3457 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003458{
3459 u32 crc_offset;
3460 u8 flashed_crc[4];
3461 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003462
3463 crc_offset = hdr_size + img_start + image_size - 4;
3464
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003465 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003466
3467 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003468 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003469 if (status) {
3470 dev_err(&adapter->pdev->dev,
3471 "could not get crc from flash, not flashing redboot\n");
3472 return false;
3473 }
3474
3475 /*update redboot only if crc does not match*/
3476 if (!memcmp(flashed_crc, p, 4))
3477 return false;
3478 else
3479 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003480}
3481
Sathya Perla306f1342011-08-02 19:57:45 +00003482static bool phy_flashing_required(struct be_adapter *adapter)
3483{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003484 return (adapter->phy.phy_type == TN_8022 &&
3485 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003486}
3487
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003488static bool is_comp_in_ufi(struct be_adapter *adapter,
3489 struct flash_section_info *fsec, int type)
3490{
3491 int i = 0, img_type = 0;
3492 struct flash_section_info_g2 *fsec_g2 = NULL;
3493
Sathya Perlaca34fe32012-11-06 17:48:56 +00003494 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003495 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3496
3497 for (i = 0; i < MAX_FLASH_COMP; i++) {
3498 if (fsec_g2)
3499 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3500 else
3501 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3502
3503 if (img_type == type)
3504 return true;
3505 }
3506 return false;
3507
3508}
3509
Jingoo Han4188e7d2013-08-05 18:02:02 +09003510static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003511 int header_size,
3512 const struct firmware *fw)
3513{
3514 struct flash_section_info *fsec = NULL;
3515 const u8 *p = fw->data;
3516
3517 p += header_size;
3518 while (p < (fw->data + fw->size)) {
3519 fsec = (struct flash_section_info *)p;
3520 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3521 return fsec;
3522 p += 32;
3523 }
3524 return NULL;
3525}
3526
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003527static int be_flash(struct be_adapter *adapter, const u8 *img,
3528 struct be_dma_mem *flash_cmd, int optype, int img_size)
3529{
3530 u32 total_bytes = 0, flash_op, num_bytes = 0;
3531 int status = 0;
3532 struct be_cmd_write_flashrom *req = flash_cmd->va;
3533
3534 total_bytes = img_size;
3535 while (total_bytes) {
3536 num_bytes = min_t(u32, 32*1024, total_bytes);
3537
3538 total_bytes -= num_bytes;
3539
3540 if (!total_bytes) {
3541 if (optype == OPTYPE_PHY_FW)
3542 flash_op = FLASHROM_OPER_PHY_FLASH;
3543 else
3544 flash_op = FLASHROM_OPER_FLASH;
3545 } else {
3546 if (optype == OPTYPE_PHY_FW)
3547 flash_op = FLASHROM_OPER_PHY_SAVE;
3548 else
3549 flash_op = FLASHROM_OPER_SAVE;
3550 }
3551
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003552 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003553 img += num_bytes;
3554 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3555 flash_op, num_bytes);
3556 if (status) {
3557 if (status == ILLEGAL_IOCTL_REQ &&
3558 optype == OPTYPE_PHY_FW)
3559 break;
3560 dev_err(&adapter->pdev->dev,
3561 "cmd to write to flash rom failed.\n");
3562 return status;
3563 }
3564 }
3565 return 0;
3566}
3567
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003568/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003569static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003570 const struct firmware *fw,
3571 struct be_dma_mem *flash_cmd,
3572 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003573
Ajit Khaparde84517482009-09-04 03:12:16 +00003574{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003575 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003576 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003577 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003578 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003579 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003580 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003581
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003582 struct flash_comp gen3_flash_types[] = {
3583 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3584 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3585 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3586 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3587 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3588 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3589 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3590 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3591 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3592 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3593 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3594 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3595 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3596 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3597 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3598 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3599 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3600 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3601 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3602 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003603 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003604
3605 struct flash_comp gen2_flash_types[] = {
3606 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3607 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3608 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3609 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3610 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3611 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3612 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3613 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3614 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3615 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3616 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3617 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3618 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3619 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3620 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3621 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003622 };
3623
Sathya Perlaca34fe32012-11-06 17:48:56 +00003624 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003625 pflashcomp = gen3_flash_types;
3626 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003627 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003628 } else {
3629 pflashcomp = gen2_flash_types;
3630 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003631 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003632 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003633
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003634 /* Get flash section info*/
3635 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3636 if (!fsec) {
3637 dev_err(&adapter->pdev->dev,
3638 "Invalid Cookie. UFI corrupted ?\n");
3639 return -1;
3640 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003641 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003642 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003643 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003644
3645 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3646 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3647 continue;
3648
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003649 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3650 !phy_flashing_required(adapter))
3651 continue;
3652
3653 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3654 redboot = be_flash_redboot(adapter, fw->data,
3655 pflashcomp[i].offset, pflashcomp[i].size,
3656 filehdr_size + img_hdrs_size);
3657 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003658 continue;
3659 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003660
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003661 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003662 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003663 if (p + pflashcomp[i].size > fw->data + fw->size)
3664 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003665
3666 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3667 pflashcomp[i].size);
3668 if (status) {
3669 dev_err(&adapter->pdev->dev,
3670 "Flashing section type %d failed.\n",
3671 pflashcomp[i].img_type);
3672 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003673 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003674 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003675 return 0;
3676}
3677
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003678static int be_flash_skyhawk(struct be_adapter *adapter,
3679 const struct firmware *fw,
3680 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003681{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003682 int status = 0, i, filehdr_size = 0;
3683 int img_offset, img_size, img_optype, redboot;
3684 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3685 const u8 *p = fw->data;
3686 struct flash_section_info *fsec = NULL;
3687
3688 filehdr_size = sizeof(struct flash_file_hdr_g3);
3689 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3690 if (!fsec) {
3691 dev_err(&adapter->pdev->dev,
3692 "Invalid Cookie. UFI corrupted ?\n");
3693 return -1;
3694 }
3695
3696 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3697 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3698 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3699
3700 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3701 case IMAGE_FIRMWARE_iSCSI:
3702 img_optype = OPTYPE_ISCSI_ACTIVE;
3703 break;
3704 case IMAGE_BOOT_CODE:
3705 img_optype = OPTYPE_REDBOOT;
3706 break;
3707 case IMAGE_OPTION_ROM_ISCSI:
3708 img_optype = OPTYPE_BIOS;
3709 break;
3710 case IMAGE_OPTION_ROM_PXE:
3711 img_optype = OPTYPE_PXE_BIOS;
3712 break;
3713 case IMAGE_OPTION_ROM_FCoE:
3714 img_optype = OPTYPE_FCOE_BIOS;
3715 break;
3716 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3717 img_optype = OPTYPE_ISCSI_BACKUP;
3718 break;
3719 case IMAGE_NCSI:
3720 img_optype = OPTYPE_NCSI_FW;
3721 break;
3722 default:
3723 continue;
3724 }
3725
3726 if (img_optype == OPTYPE_REDBOOT) {
3727 redboot = be_flash_redboot(adapter, fw->data,
3728 img_offset, img_size,
3729 filehdr_size + img_hdrs_size);
3730 if (!redboot)
3731 continue;
3732 }
3733
3734 p = fw->data;
3735 p += filehdr_size + img_offset + img_hdrs_size;
3736 if (p + img_size > fw->data + fw->size)
3737 return -1;
3738
3739 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3740 if (status) {
3741 dev_err(&adapter->pdev->dev,
3742 "Flashing section type %d failed.\n",
3743 fsec->fsec_entry[i].type);
3744 return status;
3745 }
3746 }
3747 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003748}
3749
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003750static int lancer_fw_download(struct be_adapter *adapter,
3751 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003752{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003753#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3754#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3755 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003756 const u8 *data_ptr = NULL;
3757 u8 *dest_image_ptr = NULL;
3758 size_t image_size = 0;
3759 u32 chunk_size = 0;
3760 u32 data_written = 0;
3761 u32 offset = 0;
3762 int status = 0;
3763 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003764 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003765
3766 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3767 dev_err(&adapter->pdev->dev,
3768 "FW Image not properly aligned. "
3769 "Length must be 4 byte aligned.\n");
3770 status = -EINVAL;
3771 goto lancer_fw_exit;
3772 }
3773
3774 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3775 + LANCER_FW_DOWNLOAD_CHUNK;
3776 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003777 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003778 if (!flash_cmd.va) {
3779 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003780 goto lancer_fw_exit;
3781 }
3782
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003783 dest_image_ptr = flash_cmd.va +
3784 sizeof(struct lancer_cmd_req_write_object);
3785 image_size = fw->size;
3786 data_ptr = fw->data;
3787
3788 while (image_size) {
3789 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3790
3791 /* Copy the image chunk content. */
3792 memcpy(dest_image_ptr, data_ptr, chunk_size);
3793
3794 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003795 chunk_size, offset,
3796 LANCER_FW_DOWNLOAD_LOCATION,
3797 &data_written, &change_status,
3798 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003799 if (status)
3800 break;
3801
3802 offset += data_written;
3803 data_ptr += data_written;
3804 image_size -= data_written;
3805 }
3806
3807 if (!status) {
3808 /* Commit the FW written */
3809 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003810 0, offset,
3811 LANCER_FW_DOWNLOAD_LOCATION,
3812 &data_written, &change_status,
3813 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003814 }
3815
3816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3817 flash_cmd.dma);
3818 if (status) {
3819 dev_err(&adapter->pdev->dev,
3820 "Firmware load error. "
3821 "Status code: 0x%x Additional Status: 0x%x\n",
3822 status, add_status);
3823 goto lancer_fw_exit;
3824 }
3825
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003826 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303827 dev_info(&adapter->pdev->dev,
3828 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003829 status = lancer_physdev_ctrl(adapter,
3830 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003831 if (status) {
3832 dev_err(&adapter->pdev->dev,
3833 "Adapter busy for FW reset.\n"
3834 "New FW will not be active.\n");
3835 goto lancer_fw_exit;
3836 }
3837 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3838 dev_err(&adapter->pdev->dev,
3839 "System reboot required for new FW"
3840 " to be active\n");
3841 }
3842
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003843 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3844lancer_fw_exit:
3845 return status;
3846}
3847
Sathya Perlaca34fe32012-11-06 17:48:56 +00003848#define UFI_TYPE2 2
3849#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003850#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003851#define UFI_TYPE4 4
3852static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003853 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003854{
3855 if (fhdr == NULL)
3856 goto be_get_ufi_exit;
3857
Sathya Perlaca34fe32012-11-06 17:48:56 +00003858 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3859 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003860 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3861 if (fhdr->asic_type_rev == 0x10)
3862 return UFI_TYPE3R;
3863 else
3864 return UFI_TYPE3;
3865 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003866 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003867
3868be_get_ufi_exit:
3869 dev_err(&adapter->pdev->dev,
3870 "UFI and Interface are not compatible for flashing\n");
3871 return -1;
3872}
3873
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003874static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3875{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003876 struct flash_file_hdr_g3 *fhdr3;
3877 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003878 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003879 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003880 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003881
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003882 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003883 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3884 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003885 if (!flash_cmd.va) {
3886 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003887 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003888 }
3889
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003890 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003891 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003892
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003893 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003894
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003895 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3896 for (i = 0; i < num_imgs; i++) {
3897 img_hdr_ptr = (struct image_hdr *)(fw->data +
3898 (sizeof(struct flash_file_hdr_g3) +
3899 i * sizeof(struct image_hdr)));
3900 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003901 switch (ufi_type) {
3902 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003903 status = be_flash_skyhawk(adapter, fw,
3904 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003905 break;
3906 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003907 status = be_flash_BEx(adapter, fw, &flash_cmd,
3908 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003909 break;
3910 case UFI_TYPE3:
3911 /* Do not flash this ufi on BE3-R cards */
3912 if (adapter->asic_rev < 0x10)
3913 status = be_flash_BEx(adapter, fw,
3914 &flash_cmd,
3915 num_imgs);
3916 else {
3917 status = -1;
3918 dev_err(&adapter->pdev->dev,
3919 "Can't load BE3 UFI on BE3R\n");
3920 }
3921 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003922 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003923 }
3924
Sathya Perlaca34fe32012-11-06 17:48:56 +00003925 if (ufi_type == UFI_TYPE2)
3926 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003927 else if (ufi_type == -1)
3928 status = -1;
3929
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003930 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3931 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003932 if (status) {
3933 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003934 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003935 }
3936
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003937 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003938
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003939be_fw_exit:
3940 return status;
3941}
3942
3943int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3944{
3945 const struct firmware *fw;
3946 int status;
3947
3948 if (!netif_running(adapter->netdev)) {
3949 dev_err(&adapter->pdev->dev,
3950 "Firmware load not allowed (interface is down)\n");
3951 return -1;
3952 }
3953
3954 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3955 if (status)
3956 goto fw_exit;
3957
3958 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3959
3960 if (lancer_chip(adapter))
3961 status = lancer_fw_download(adapter, fw);
3962 else
3963 status = be_fw_download(adapter, fw);
3964
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003965 if (!status)
3966 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3967 adapter->fw_on_flash);
3968
Ajit Khaparde84517482009-09-04 03:12:16 +00003969fw_exit:
3970 release_firmware(fw);
3971 return status;
3972}
3973
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003974static int be_ndo_bridge_setlink(struct net_device *dev,
3975 struct nlmsghdr *nlh)
3976{
3977 struct be_adapter *adapter = netdev_priv(dev);
3978 struct nlattr *attr, *br_spec;
3979 int rem;
3980 int status = 0;
3981 u16 mode = 0;
3982
3983 if (!sriov_enabled(adapter))
3984 return -EOPNOTSUPP;
3985
3986 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3987
3988 nla_for_each_nested(attr, br_spec, rem) {
3989 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3990 continue;
3991
3992 mode = nla_get_u16(attr);
3993 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3994 return -EINVAL;
3995
3996 status = be_cmd_set_hsw_config(adapter, 0, 0,
3997 adapter->if_handle,
3998 mode == BRIDGE_MODE_VEPA ?
3999 PORT_FWD_TYPE_VEPA :
4000 PORT_FWD_TYPE_VEB);
4001 if (status)
4002 goto err;
4003
4004 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4005 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4006
4007 return status;
4008 }
4009err:
4010 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4011 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4012
4013 return status;
4014}
4015
4016static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4017 struct net_device *dev,
4018 u32 filter_mask)
4019{
4020 struct be_adapter *adapter = netdev_priv(dev);
4021 int status = 0;
4022 u8 hsw_mode;
4023
4024 if (!sriov_enabled(adapter))
4025 return 0;
4026
4027 /* BE and Lancer chips support VEB mode only */
4028 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4029 hsw_mode = PORT_FWD_TYPE_VEB;
4030 } else {
4031 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4032 adapter->if_handle, &hsw_mode);
4033 if (status)
4034 return 0;
4035 }
4036
4037 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4038 hsw_mode == PORT_FWD_TYPE_VEPA ?
4039 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4040}
4041
stephen hemmingere5686ad2012-01-05 19:10:25 +00004042static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004043 .ndo_open = be_open,
4044 .ndo_stop = be_close,
4045 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004046 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004047 .ndo_set_mac_address = be_mac_addr_set,
4048 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004049 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004050 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004051 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4052 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004053 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004054 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004055 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004056 .ndo_get_vf_config = be_get_vf_config,
4057#ifdef CONFIG_NET_POLL_CONTROLLER
4058 .ndo_poll_controller = be_netpoll,
4059#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004060 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4061 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304062#ifdef CONFIG_NET_RX_BUSY_POLL
4063 .ndo_busy_poll = be_busy_poll
4064#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004065};
4066
4067static void be_netdev_init(struct net_device *netdev)
4068{
4069 struct be_adapter *adapter = netdev_priv(netdev);
4070
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004071 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004072 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004073 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004074 if (be_multi_rxq(adapter))
4075 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004076
4077 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004078 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004079
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004080 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004081 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004082
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004083 netdev->priv_flags |= IFF_UNICAST_FLT;
4084
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004085 netdev->flags |= IFF_MULTICAST;
4086
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004087 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004089 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004090
4091 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004092}
4093
4094static void be_unmap_pci_bars(struct be_adapter *adapter)
4095{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004096 if (adapter->csr)
4097 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004098 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004099 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004100}
4101
Sathya Perlace66f782012-11-06 17:48:58 +00004102static int db_bar(struct be_adapter *adapter)
4103{
4104 if (lancer_chip(adapter) || !be_physfn(adapter))
4105 return 0;
4106 else
4107 return 4;
4108}
4109
4110static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004111{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004112 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004113 adapter->roce_db.size = 4096;
4114 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4115 db_bar(adapter));
4116 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4117 db_bar(adapter));
4118 }
Parav Pandit045508a2012-03-26 14:27:13 +00004119 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004120}
4121
4122static int be_map_pci_bars(struct be_adapter *adapter)
4123{
4124 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004125
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004126 if (BEx_chip(adapter) && be_physfn(adapter)) {
4127 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4128 if (adapter->csr == NULL)
4129 return -ENOMEM;
4130 }
4131
Sathya Perlace66f782012-11-06 17:48:58 +00004132 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004133 if (addr == NULL)
4134 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004135 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004136
4137 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004138 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004139
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004140pci_map_err:
4141 be_unmap_pci_bars(adapter);
4142 return -ENOMEM;
4143}
4144
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145static void be_ctrl_cleanup(struct be_adapter *adapter)
4146{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004147 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004148
4149 be_unmap_pci_bars(adapter);
4150
4151 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004152 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4153 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004154
Sathya Perla5b8821b2011-08-02 19:57:44 +00004155 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004156 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004157 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4158 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004159}
4160
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004161static int be_ctrl_init(struct be_adapter *adapter)
4162{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004163 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4164 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004165 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004166 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004167 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004168
Sathya Perlace66f782012-11-06 17:48:58 +00004169 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4170 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4171 SLI_INTF_FAMILY_SHIFT;
4172 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004174 status = be_map_pci_bars(adapter);
4175 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004176 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177
4178 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004179 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4180 mbox_mem_alloc->size,
4181 &mbox_mem_alloc->dma,
4182 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004183 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004184 status = -ENOMEM;
4185 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004186 }
4187 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4188 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4189 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4190 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004191
Sathya Perla5b8821b2011-08-02 19:57:44 +00004192 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004193 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4194 rx_filter->size, &rx_filter->dma,
4195 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004196 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004197 status = -ENOMEM;
4198 goto free_mbox;
4199 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004200
Ivan Vecera29849612010-12-14 05:43:19 +00004201 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004202 spin_lock_init(&adapter->mcc_lock);
4203 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004204
Suresh Reddy5eeff632014-01-06 13:02:24 +05304205 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004206 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004208
4209free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004210 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4211 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004212
4213unmap_pci_bars:
4214 be_unmap_pci_bars(adapter);
4215
4216done:
4217 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004218}
4219
4220static void be_stats_cleanup(struct be_adapter *adapter)
4221{
Sathya Perla3abcded2010-10-03 22:12:27 -07004222 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004223
4224 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004225 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4226 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004227}
4228
4229static int be_stats_init(struct be_adapter *adapter)
4230{
Sathya Perla3abcded2010-10-03 22:12:27 -07004231 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004232
Sathya Perlaca34fe32012-11-06 17:48:56 +00004233 if (lancer_chip(adapter))
4234 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4235 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004236 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004237 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004238 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004239 else
4240 /* ALL non-BE ASICs */
4241 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004242
Joe Perchesede23fa82013-08-26 22:45:23 -07004243 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4244 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004245 if (cmd->va == NULL)
4246 return -1;
4247 return 0;
4248}
4249
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004250static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004251{
4252 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004253
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004254 if (!adapter)
4255 return;
4256
Parav Pandit045508a2012-03-26 14:27:13 +00004257 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004258 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004259
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004260 cancel_delayed_work_sync(&adapter->func_recovery_work);
4261
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004262 unregister_netdev(adapter->netdev);
4263
Sathya Perla5fb379e2009-06-18 00:02:59 +00004264 be_clear(adapter);
4265
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004266 /* tell fw we're done with firing cmds */
4267 be_cmd_fw_clean(adapter);
4268
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004269 be_stats_cleanup(adapter);
4270
4271 be_ctrl_cleanup(adapter);
4272
Sathya Perlad6b6d982012-09-05 01:56:48 +00004273 pci_disable_pcie_error_reporting(pdev);
4274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275 pci_release_regions(pdev);
4276 pci_disable_device(pdev);
4277
4278 free_netdev(adapter->netdev);
4279}
4280
Sathya Perla39f1d942012-05-08 19:41:24 +00004281static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004282{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304283 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004284
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004285 status = be_cmd_get_cntl_attributes(adapter);
4286 if (status)
4287 return status;
4288
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004289 /* Must be a power of 2 or else MODULO will BUG_ON */
4290 adapter->be_get_temp_freq = 64;
4291
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304292 if (BEx_chip(adapter)) {
4293 level = be_cmd_get_fw_log_level(adapter);
4294 adapter->msg_enable =
4295 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4296 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004297
Sathya Perla92bf14a2013-08-27 16:57:32 +05304298 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004299 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004300}
4301
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004302static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004303{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004304 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004305 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004306
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004307 status = lancer_test_and_set_rdy_state(adapter);
4308 if (status)
4309 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004310
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004311 if (netif_running(adapter->netdev))
4312 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004313
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004314 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004315
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004316 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004317
4318 status = be_setup(adapter);
4319 if (status)
4320 goto err;
4321
4322 if (netif_running(adapter->netdev)) {
4323 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004324 if (status)
4325 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004326 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004327
Somnath Kotur4bebb562013-12-05 12:07:55 +05304328 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004329 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004330err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004331 if (status == -EAGAIN)
4332 dev_err(dev, "Waiting for resource provisioning\n");
4333 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304334 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004335
4336 return status;
4337}
4338
4339static void be_func_recovery_task(struct work_struct *work)
4340{
4341 struct be_adapter *adapter =
4342 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004343 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004344
4345 be_detect_error(adapter);
4346
4347 if (adapter->hw_error && lancer_chip(adapter)) {
4348
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004349 rtnl_lock();
4350 netif_device_detach(adapter->netdev);
4351 rtnl_unlock();
4352
4353 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004354 if (!status)
4355 netif_device_attach(adapter->netdev);
4356 }
4357
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004358 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4359 * no need to attempt further recovery.
4360 */
4361 if (!status || status == -EAGAIN)
4362 schedule_delayed_work(&adapter->func_recovery_work,
4363 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004364}
4365
4366static void be_worker(struct work_struct *work)
4367{
4368 struct be_adapter *adapter =
4369 container_of(work, struct be_adapter, work.work);
4370 struct be_rx_obj *rxo;
4371 int i;
4372
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004373 /* when interrupts are not yet enabled, just reap any pending
4374 * mcc completions */
4375 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004376 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004377 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004378 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004379 goto reschedule;
4380 }
4381
4382 if (!adapter->stats_cmd_sent) {
4383 if (lancer_chip(adapter))
4384 lancer_cmd_get_pport_stats(adapter,
4385 &adapter->stats_cmd);
4386 else
4387 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4388 }
4389
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304390 if (be_physfn(adapter) &&
4391 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004392 be_cmd_get_die_temperature(adapter);
4393
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004394 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304395 /* Replenish RX-queues starved due to memory
4396 * allocation failures.
4397 */
4398 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004399 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004400 }
4401
Sathya Perla2632baf2013-10-01 16:00:00 +05304402 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004403
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004404reschedule:
4405 adapter->work_counter++;
4406 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4407}
4408
Sathya Perla257a3fe2013-06-14 15:54:51 +05304409/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004410static bool be_reset_required(struct be_adapter *adapter)
4411{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304412 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004413}
4414
Sathya Perlad3791422012-09-28 04:39:44 +00004415static char *mc_name(struct be_adapter *adapter)
4416{
4417 if (adapter->function_mode & FLEX10_MODE)
4418 return "FLEX10";
4419 else if (adapter->function_mode & VNIC_MODE)
4420 return "vNIC";
4421 else if (adapter->function_mode & UMC_ENABLED)
4422 return "UMC";
4423 else
4424 return "";
4425}
4426
4427static inline char *func_name(struct be_adapter *adapter)
4428{
4429 return be_physfn(adapter) ? "PF" : "VF";
4430}
4431
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004432static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433{
4434 int status = 0;
4435 struct be_adapter *adapter;
4436 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004437 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004438
4439 status = pci_enable_device(pdev);
4440 if (status)
4441 goto do_none;
4442
4443 status = pci_request_regions(pdev, DRV_NAME);
4444 if (status)
4445 goto disable_dev;
4446 pci_set_master(pdev);
4447
Sathya Perla7f640062012-06-05 19:37:20 +00004448 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004449 if (netdev == NULL) {
4450 status = -ENOMEM;
4451 goto rel_reg;
4452 }
4453 adapter = netdev_priv(netdev);
4454 adapter->pdev = pdev;
4455 pci_set_drvdata(pdev, adapter);
4456 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004457 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004458
Russell King4c15c242013-06-26 23:49:11 +01004459 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004460 if (!status) {
4461 netdev->features |= NETIF_F_HIGHDMA;
4462 } else {
Russell King4c15c242013-06-26 23:49:11 +01004463 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004464 if (status) {
4465 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4466 goto free_netdev;
4467 }
4468 }
4469
Ajit Khapardeea58c182013-10-18 16:06:24 -05004470 if (be_physfn(adapter)) {
4471 status = pci_enable_pcie_error_reporting(pdev);
4472 if (!status)
4473 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4474 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004475
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004476 status = be_ctrl_init(adapter);
4477 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004478 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004479
Sathya Perla2243e2e2009-11-22 22:02:03 +00004480 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004481 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004482 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004483 if (status)
4484 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004485 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004486
Sathya Perla39f1d942012-05-08 19:41:24 +00004487 if (be_reset_required(adapter)) {
4488 status = be_cmd_reset_function(adapter);
4489 if (status)
4490 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004491
Kalesh AP2d177be2013-04-28 22:22:29 +00004492 /* Wait for interrupts to quiesce after an FLR */
4493 msleep(100);
4494 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004495
4496 /* Allow interrupts for other ULPs running on NIC function */
4497 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004498
Kalesh AP2d177be2013-04-28 22:22:29 +00004499 /* tell fw we're ready to fire cmds */
4500 status = be_cmd_fw_init(adapter);
4501 if (status)
4502 goto ctrl_clean;
4503
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004504 status = be_stats_init(adapter);
4505 if (status)
4506 goto ctrl_clean;
4507
Sathya Perla39f1d942012-05-08 19:41:24 +00004508 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004509 if (status)
4510 goto stats_clean;
4511
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004512 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004513 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004514 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515
Sathya Perla5fb379e2009-06-18 00:02:59 +00004516 status = be_setup(adapter);
4517 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004518 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004519
Sathya Perla3abcded2010-10-03 22:12:27 -07004520 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521 status = register_netdev(netdev);
4522 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004523 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004524
Parav Pandit045508a2012-03-26 14:27:13 +00004525 be_roce_dev_add(adapter);
4526
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004527 schedule_delayed_work(&adapter->func_recovery_work,
4528 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004529
4530 be_cmd_query_port_name(adapter, &port_name);
4531
Sathya Perlad3791422012-09-28 04:39:44 +00004532 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4533 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004535 return 0;
4536
Sathya Perla5fb379e2009-06-18 00:02:59 +00004537unsetup:
4538 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004539stats_clean:
4540 be_stats_cleanup(adapter);
4541ctrl_clean:
4542 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004543free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004544 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004545rel_reg:
4546 pci_release_regions(pdev);
4547disable_dev:
4548 pci_disable_device(pdev);
4549do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004550 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004551 return status;
4552}
4553
4554static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4555{
4556 struct be_adapter *adapter = pci_get_drvdata(pdev);
4557 struct net_device *netdev = adapter->netdev;
4558
Suresh Reddy76a9e082014-01-15 13:23:40 +05304559 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004560 be_setup_wol(adapter, true);
4561
Ajit Khaparded4360d62013-11-22 12:51:09 -06004562 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004563 cancel_delayed_work_sync(&adapter->func_recovery_work);
4564
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004565 netif_device_detach(netdev);
4566 if (netif_running(netdev)) {
4567 rtnl_lock();
4568 be_close(netdev);
4569 rtnl_unlock();
4570 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004571 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004572
4573 pci_save_state(pdev);
4574 pci_disable_device(pdev);
4575 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4576 return 0;
4577}
4578
4579static int be_resume(struct pci_dev *pdev)
4580{
4581 int status = 0;
4582 struct be_adapter *adapter = pci_get_drvdata(pdev);
4583 struct net_device *netdev = adapter->netdev;
4584
4585 netif_device_detach(netdev);
4586
4587 status = pci_enable_device(pdev);
4588 if (status)
4589 return status;
4590
Yijing Wang1ca01512013-06-27 20:53:42 +08004591 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004592 pci_restore_state(pdev);
4593
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304594 status = be_fw_wait_ready(adapter);
4595 if (status)
4596 return status;
4597
Ajit Khaparded4360d62013-11-22 12:51:09 -06004598 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004599 /* tell fw we're ready to fire cmds */
4600 status = be_cmd_fw_init(adapter);
4601 if (status)
4602 return status;
4603
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004604 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004605 if (netif_running(netdev)) {
4606 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004607 be_open(netdev);
4608 rtnl_unlock();
4609 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004610
4611 schedule_delayed_work(&adapter->func_recovery_work,
4612 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004613 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004614
Suresh Reddy76a9e082014-01-15 13:23:40 +05304615 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004616 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004617
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004618 return 0;
4619}
4620
Sathya Perla82456b02010-02-17 01:35:37 +00004621/*
4622 * An FLR will stop BE from DMAing any data.
4623 */
4624static void be_shutdown(struct pci_dev *pdev)
4625{
4626 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004627
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004628 if (!adapter)
4629 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004630
Sathya Perla0f4a6822011-03-21 20:49:28 +00004631 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004632 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004633
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004634 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004635
Ajit Khaparde57841862011-04-06 18:08:43 +00004636 be_cmd_reset_function(adapter);
4637
Sathya Perla82456b02010-02-17 01:35:37 +00004638 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004639}
4640
Sathya Perlacf588472010-02-14 21:22:01 +00004641static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4642 pci_channel_state_t state)
4643{
4644 struct be_adapter *adapter = pci_get_drvdata(pdev);
4645 struct net_device *netdev = adapter->netdev;
4646
4647 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4648
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004649 if (!adapter->eeh_error) {
4650 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004651
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004652 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004653
Sathya Perlacf588472010-02-14 21:22:01 +00004654 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004655 netif_device_detach(netdev);
4656 if (netif_running(netdev))
4657 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004658 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004659
4660 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004661 }
Sathya Perlacf588472010-02-14 21:22:01 +00004662
4663 if (state == pci_channel_io_perm_failure)
4664 return PCI_ERS_RESULT_DISCONNECT;
4665
4666 pci_disable_device(pdev);
4667
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004668 /* The error could cause the FW to trigger a flash debug dump.
4669 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004670 * can cause it not to recover; wait for it to finish.
4671 * Wait only for first function as it is needed only once per
4672 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004673 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004674 if (pdev->devfn == 0)
4675 ssleep(30);
4676
Sathya Perlacf588472010-02-14 21:22:01 +00004677 return PCI_ERS_RESULT_NEED_RESET;
4678}
4679
4680static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4681{
4682 struct be_adapter *adapter = pci_get_drvdata(pdev);
4683 int status;
4684
4685 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004686
4687 status = pci_enable_device(pdev);
4688 if (status)
4689 return PCI_ERS_RESULT_DISCONNECT;
4690
4691 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004692 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004693 pci_restore_state(pdev);
4694
4695 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004696 dev_info(&adapter->pdev->dev,
4697 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004698 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004699 if (status)
4700 return PCI_ERS_RESULT_DISCONNECT;
4701
Sathya Perlad6b6d982012-09-05 01:56:48 +00004702 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004703 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004704 return PCI_ERS_RESULT_RECOVERED;
4705}
4706
4707static void be_eeh_resume(struct pci_dev *pdev)
4708{
4709 int status = 0;
4710 struct be_adapter *adapter = pci_get_drvdata(pdev);
4711 struct net_device *netdev = adapter->netdev;
4712
4713 dev_info(&adapter->pdev->dev, "EEH resume\n");
4714
4715 pci_save_state(pdev);
4716
Kalesh AP2d177be2013-04-28 22:22:29 +00004717 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004718 if (status)
4719 goto err;
4720
Kalesh AP2d177be2013-04-28 22:22:29 +00004721 /* tell fw we're ready to fire cmds */
4722 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004723 if (status)
4724 goto err;
4725
Sathya Perlacf588472010-02-14 21:22:01 +00004726 status = be_setup(adapter);
4727 if (status)
4728 goto err;
4729
4730 if (netif_running(netdev)) {
4731 status = be_open(netdev);
4732 if (status)
4733 goto err;
4734 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004735
4736 schedule_delayed_work(&adapter->func_recovery_work,
4737 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004738 netif_device_attach(netdev);
4739 return;
4740err:
4741 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004742}
4743
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004744static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004745 .error_detected = be_eeh_err_detected,
4746 .slot_reset = be_eeh_reset,
4747 .resume = be_eeh_resume,
4748};
4749
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004750static struct pci_driver be_driver = {
4751 .name = DRV_NAME,
4752 .id_table = be_dev_ids,
4753 .probe = be_probe,
4754 .remove = be_remove,
4755 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004756 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004757 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004758 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004759};
4760
4761static int __init be_init_module(void)
4762{
Joe Perches8e95a202009-12-03 07:58:21 +00004763 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4764 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004765 printk(KERN_WARNING DRV_NAME
4766 " : Module param rx_frag_size must be 2048/4096/8192."
4767 " Using 2048\n");
4768 rx_frag_size = 2048;
4769 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004770
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004771 return pci_register_driver(&be_driver);
4772}
4773module_init(be_init_module);
4774
4775static void __exit be_exit_module(void)
4776{
4777 pci_unregister_driver(&be_driver);
4778}
4779module_exit(be_exit_module);