blob: d34ea98ea2c45c5b22bd89efcdedc53469eda3ab [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Sathya Perla5a712c12013-07-23 15:24:59 +0530259 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * privilege or if PF did not provision the new MAC address.
261 * On BE3, this cmd will always fail if the VF doesn't have the
262 * FILTMGMT privilege. This failure is OK, only if the PF programmed
263 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000264 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266 adapter->if_handle, &adapter->pmac_id[0], 0);
267 if (!status) {
268 curr_pmac_id = adapter->pmac_id[0];
269
270 /* Delete the old programmed MAC. This call may fail if the
271 * old MAC was already deleted by the PF driver.
272 */
273 if (adapter->pmac_id[0] != old_pmac_id)
274 be_cmd_pmac_del(adapter, adapter->if_handle,
275 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 }
277
Sathya Perla5a712c12013-07-23 15:24:59 +0530278 /* Decide if the new MAC is successfully activated only after
279 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000280 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530281 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000282 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000283 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The MAC change did not happen, either due to lack of privilege
286 * or PF didn't pre-provision.
287 */
288 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289 status = -EPERM;
290 goto err;
291 }
292
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530294 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 return 0;
296err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Jingoo Han4188e7d2013-08-05 18:02:02 +0900475static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530785
786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795
796 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000798 if (unlikely(!skb))
799 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000800 skb->vlan_tci = 0;
801 }
802
803 /* Insert the outer VLAN, if any */
804 if (adapter->qnq_vid) {
805 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400806 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000807 if (unlikely(!skb))
808 return skb;
809 if (skip_hw_vlan)
810 *skip_hw_vlan = true;
811 }
812
Somnath Kotur93040ae2012-06-26 22:32:10 +0000813 return skb;
814}
815
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817{
818 struct ethhdr *eh = (struct ethhdr *)skb->data;
819 u16 offset = ETH_HLEN;
820
821 if (eh->h_proto == htons(ETH_P_IPV6)) {
822 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824 offset += sizeof(struct ipv6hdr);
825 if (ip6h->nexthdr != NEXTHDR_TCP &&
826 ip6h->nexthdr != NEXTHDR_UDP) {
827 struct ipv6_opt_hdr *ehdr =
828 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831 if (ehdr->hdrlen == 0xff)
832 return true;
833 }
834 }
835 return false;
836}
837
838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839{
840 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000845{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000846 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000847}
848
Sathya Perlaee9c7992013-05-22 23:04:55 +0000849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850 struct sk_buff *skb,
851 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000853 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000854 unsigned int eth_hdr_len;
855 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000856
Somnath Kotur48265662013-05-26 21:08:47 +0000857 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858 * may cause a transmit stall on that port. So the work-around is to
859 * pad such packets to a 36-byte length.
860 */
861 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862 if (skb_padto(skb, 36))
863 goto tx_drop;
864 skb->len = 36;
865 }
866
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000869 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000870 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000871 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000873 if (skb->len <= 60 &&
874 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000875 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000876 ip = (struct iphdr *)ip_hdr(skb);
877 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878 }
879
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000880 /* If vlan tag is already inlined in the packet, skip HW VLAN
881 * tagging in UMC mode
882 */
883 if ((adapter->function_mode & UMC_ENABLED) &&
884 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000885 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000886
Somnath Kotur93040ae2012-06-26 22:32:10 +0000887 /* HW has a bug wherein it will calculate CSUM for VLAN
888 * pkts even though it is disabled.
889 * Manually insert VLAN in pkt.
890 */
891 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000892 vlan_tx_tag_present(skb)) {
893 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 if (unlikely(!skb))
895 goto tx_drop;
896 }
897
898 /* HW may lockup when VLAN HW tagging is requested on
899 * certain ipv6 packets. Drop such pkts if the HW workaround to
900 * skip HW tagging is not enabled by FW.
901 */
902 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000903 (adapter->pvid || adapter->qnq_vid) &&
904 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000905 goto tx_drop;
906
907 /* Manual VLAN tag insertion to prevent:
908 * ASIC lockup when the ASIC inserts VLAN tag into
909 * certain ipv6 packets. Insert VLAN tags in driver,
910 * and set event, completion, vlan bits accordingly
911 * in the Tx WRB.
912 */
913 if (be_ipv6_tx_stall_chk(adapter, skb) &&
914 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000915 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000916 if (unlikely(!skb))
917 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000918 }
919
Sathya Perlaee9c7992013-05-22 23:04:55 +0000920 return skb;
921tx_drop:
922 dev_kfree_skb_any(skb);
923 return NULL;
924}
925
926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930 struct be_queue_info *txq = &txo->q;
931 bool dummy_wrb, stopped = false;
932 u32 wrb_cnt = 0, copied = 0;
933 bool skip_hw_vlan = false;
934 u32 start = txq->head;
935
936 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937 if (!skb)
938 return NETDEV_TX_OK;
939
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000940 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000942 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000944 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000945 int gso_segs = skb_shinfo(skb)->gso_segs;
946
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000947 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000948 BUG_ON(txo->sent_skb_list[start]);
949 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000951 /* Ensure txq has space for the next skb; Else stop the queue
952 * *BEFORE* ringing the tx doorbell, so that we serialze the
953 * tx compls of the current transmit which'll wake up the queue
954 */
Sathya Perla7101e112010-03-22 20:41:12 +0000955 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000956 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000958 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000959 stopped = true;
960 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000962 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000963
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000964 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000965 } else {
966 txq->head = start;
967 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700968 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 return NETDEV_TX_OK;
970}
971
972static int be_change_mtu(struct net_device *netdev, int new_mtu)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000976 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978 dev_info(&adapter->pdev->dev,
979 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000980 BE_MIN_MTU,
981 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 return -EINVAL;
983 }
984 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985 netdev->mtu, new_mtu);
986 netdev->mtu = new_mtu;
987 return 0;
988}
989
990/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993 */
Sathya Perla10329df2012-06-05 19:37:18 +0000994static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995{
Sathya Perla10329df2012-06-05 19:37:18 +0000996 u16 vids[BE_NUM_VLANS_SUPPORTED];
997 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000998 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001000 /* No need to further configure vids if in promiscuous mode */
1001 if (adapter->promiscuous)
1002 return 0;
1003
Sathya Perla92bf14a2013-08-27 16:57:32 +05301004 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001005 goto set_vlan_promisc;
1006
1007 /* Construct VLAN Table to give to HW */
1008 for (i = 0; i < VLAN_N_VID; i++)
1009 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001010 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001011
1012 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001013 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001014
1015 /* Set to VLAN promisc mode as setting VLAN filter failed */
1016 if (status) {
1017 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001021
Sathya Perlab31c50a2009-09-17 10:30:13 -07001022 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001023
1024set_vlan_promisc:
1025 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026 NULL, 0, 1, 1);
1027 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028}
1029
Patrick McHardy80d5c362013-04-19 02:04:28 +00001030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031{
1032 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001033 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001035 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001036 status = -EINVAL;
1037 goto ret;
1038 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001039
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001040 /* Packets with VID 0 are always received by Lancer by default */
1041 if (lancer_chip(adapter) && vid == 0)
1042 goto ret;
1043
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301045 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001046 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001047
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001048 if (!status)
1049 adapter->vlans_added++;
1050 else
1051 adapter->vlan_tag[vid] = 0;
1052ret:
1053 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054}
1055
Patrick McHardy80d5c362013-04-19 02:04:28 +00001056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001059 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001061 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001062 status = -EINVAL;
1063 goto ret;
1064 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001065
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001066 /* Packets with VID 0 are always received by Lancer by default */
1067 if (lancer_chip(adapter) && vid == 0)
1068 goto ret;
1069
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301071 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001072 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001073
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001074 if (!status)
1075 adapter->vlans_added--;
1076 else
1077 adapter->vlan_tag[vid] = 1;
1078ret:
1079 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080}
1081
Sathya Perlaa54769f2011-10-24 02:45:00 +00001082static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086
1087 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001088 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001089 adapter->promiscuous = true;
1090 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001092
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001093 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001094 if (adapter->promiscuous) {
1095 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001096 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001097
1098 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001099 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001100 }
1101
Sathya Perlae7b909a2009-11-22 22:01:10 +00001102 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001103 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001106 goto done;
1107 }
1108
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001109 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110 struct netdev_hw_addr *ha;
1111 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114 be_cmd_pmac_del(adapter, adapter->if_handle,
1115 adapter->pmac_id[i], 0);
1116 }
1117
Sathya Perla92bf14a2013-08-27 16:57:32 +05301118 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true;
1121 goto done;
1122 }
1123
1124 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125 adapter->uc_macs++; /* First slot is for Primary MAC */
1126 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127 adapter->if_handle,
1128 &adapter->pmac_id[adapter->uc_macs], 0);
1129 }
1130 }
1131
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001132 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135 if (status) {
1136 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001140done:
1141 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142}
1143
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001148 int status;
1149
Sathya Perla11ac75e2011-12-13 00:58:50 +00001150 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001151 return -EPERM;
1152
Sathya Perla11ac75e2011-12-13 00:58:50 +00001153 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001154 return -EINVAL;
1155
Sathya Perla3175d8c2013-07-23 15:25:03 +05301156 if (BEx_chip(adapter)) {
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1158 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001159
Sathya Perla11ac75e2011-12-13 00:58:50 +00001160 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1161 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301162 } else {
1163 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1164 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001165 }
1166
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001167 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001168 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1169 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001170 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001171 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001172
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001173 return status;
1174}
1175
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001176static int be_get_vf_config(struct net_device *netdev, int vf,
1177 struct ifla_vf_info *vi)
1178{
1179 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001180 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001181
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001183 return -EPERM;
1184
Sathya Perla11ac75e2011-12-13 00:58:50 +00001185 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001186 return -EINVAL;
1187
1188 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001189 vi->tx_rate = vf_cfg->tx_rate;
1190 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001191 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001192 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001193
1194 return 0;
1195}
1196
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001197static int be_set_vf_vlan(struct net_device *netdev,
1198 int vf, u16 vlan, u8 qos)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
1201 int status = 0;
1202
Sathya Perla11ac75e2011-12-13 00:58:50 +00001203 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001204 return -EPERM;
1205
Sathya Perla11ac75e2011-12-13 00:58:50 +00001206 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001207 return -EINVAL;
1208
1209 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001210 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1211 /* If this is new value, program it. Else skip. */
1212 adapter->vf_cfg[vf].vlan_tag = vlan;
1213
1214 status = be_cmd_set_hsw_config(adapter, vlan,
1215 vf + 1, adapter->vf_cfg[vf].if_handle);
1216 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001217 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001218 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001219 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001220 vlan = adapter->vf_cfg[vf].def_vid;
1221 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1222 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001223 }
1224
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001225
1226 if (status)
1227 dev_info(&adapter->pdev->dev,
1228 "VLAN %d config on VF %d failed\n", vlan, vf);
1229 return status;
1230}
1231
Ajit Khapardee1d18732010-07-23 01:52:13 +00001232static int be_set_vf_tx_rate(struct net_device *netdev,
1233 int vf, int rate)
1234{
1235 struct be_adapter *adapter = netdev_priv(netdev);
1236 int status = 0;
1237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001239 return -EPERM;
1240
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001241 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001242 return -EINVAL;
1243
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001244 if (rate < 100 || rate > 10000) {
1245 dev_err(&adapter->pdev->dev,
1246 "tx rate must be between 100 and 10000 Mbps\n");
1247 return -EINVAL;
1248 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001249
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001250 if (lancer_chip(adapter))
1251 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1252 else
1253 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001254
1255 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001256 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001257 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001258 else
1259 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001260 return status;
1261}
1262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001266 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001267 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001268 u64 pkts;
1269 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001277 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
Sathya Perla4097f662009-03-24 16:40:13 -07001281 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001284 return;
1285 }
1286
Sathya Perlaac124ff2011-07-25 19:10:14 +00001287 /* Update once a second */
1288 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001289 return;
1290
Sathya Perlaab1594e2011-07-25 19:10:15 +00001291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001297 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001298 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001302 if (eqd < 10)
1303 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304
1305modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001309 }
Sathya Perla4097f662009-03-24 16:40:13 -07001310}
1311
Sathya Perla3abcded2010-10-03 22:12:27 -07001312static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001314{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001315 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001316
Sathya Perlaab1594e2011-07-25 19:10:15 +00001317 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001318 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001320 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001322 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001324 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001325 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326}
1327
Sathya Perla2e588f82011-03-11 02:49:26 +00001328static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001329{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001334}
1335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001339 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 BUG_ON(!rx_page_info->page);
1345
Ajit Khaparde205859a2010-02-09 01:34:21 +00001346 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001350 rx_page_info->last_page_user = false;
1351 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355}
1356
1357/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360{
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001365 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001369 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 }
1371}
1372
1373/*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla3abcded2010-10-03 22:12:27 -07001380 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 u8 *start;
1385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001395 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001404 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001409 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 skb->tail += hdr_len;
1411 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001412 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 }
1418
1419 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001430 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
Eric Dumazet9e903e02011-10-18 21:00:24 +00001439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001442 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001445 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001447 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001450/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001454 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001455 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001457
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001459 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001460 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001461 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 return;
1463 }
1464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001465 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001468 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001469 else
1470 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001472 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001474 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001475 skb->rxhash = rxcp->rss_hash;
1476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Jiri Pirko343e43c2011-08-25 02:50:51 +00001478 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001480
1481 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482}
1483
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001484/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001485static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1486 struct napi_struct *napi,
1487 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001489 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001491 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001492 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001493 u16 remaining, curr_frag_len;
1494 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001495
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001496 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001497 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001499 return;
1500 }
1501
Sathya Perla2e588f82011-03-11 02:49:26 +00001502 remaining = rxcp->pkt_size;
1503 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001504 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505
1506 curr_frag_len = min(remaining, rx_frag_size);
1507
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001508 /* Coalesce all frags from the same physical page in one slot */
1509 if (i == 0 || page_info->page_offset == 0) {
1510 /* First frag or Fresh page */
1511 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001512 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001513 skb_shinfo(skb)->frags[j].page_offset =
1514 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001515 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001516 } else {
1517 put_page(page_info->page);
1518 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001519 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001520 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 memset(page_info, 0, sizeof(*page_info));
1524 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001525 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001527 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 skb->len = rxcp->pkt_size;
1529 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001530 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001531 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001532 if (adapter->netdev->features & NETIF_F_RXHASH)
1533 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001534
Jiri Pirko343e43c2011-08-25 02:50:51 +00001535 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001536 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001537
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001538 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539}
1540
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001541static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1542 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543{
Sathya Perla2e588f82011-03-11 02:49:26 +00001544 rxcp->pkt_size =
1545 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1546 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1547 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1548 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001549 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001550 rxcp->ip_csum =
1551 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1552 rxcp->l4_csum =
1553 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1554 rxcp->ipv6 =
1555 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1556 rxcp->rxq_idx =
1557 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1558 rxcp->num_rcvd =
1559 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1560 rxcp->pkt_type =
1561 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001562 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001563 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001564 if (rxcp->vlanf) {
1565 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001566 compl);
1567 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1568 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001569 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001570 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001571}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001573static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1574 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001575{
1576 rxcp->pkt_size =
1577 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1578 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1579 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1580 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001581 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001582 rxcp->ip_csum =
1583 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1584 rxcp->l4_csum =
1585 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1586 rxcp->ipv6 =
1587 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1588 rxcp->rxq_idx =
1589 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1590 rxcp->num_rcvd =
1591 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1592 rxcp->pkt_type =
1593 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001594 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001595 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001596 if (rxcp->vlanf) {
1597 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001598 compl);
1599 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1600 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001601 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001602 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001603 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1604 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001605}
1606
1607static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1608{
1609 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1610 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1611 struct be_adapter *adapter = rxo->adapter;
1612
1613 /* For checking the valid bit it is Ok to use either definition as the
1614 * valid bit is at the same position in both v0 and v1 Rx compl */
1615 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 return NULL;
1617
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001618 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001619 be_dws_le_to_cpu(compl, sizeof(*compl));
1620
1621 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001622 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001623 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001625
Somnath Koture38b1702013-05-29 22:55:56 +00001626 if (rxcp->ip_frag)
1627 rxcp->l4_csum = 0;
1628
Sathya Perla15d72182011-03-21 20:49:26 +00001629 if (rxcp->vlanf) {
1630 /* vlanf could be wrongly set in some cards.
1631 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001632 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001633 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001634
Sathya Perla15d72182011-03-21 20:49:26 +00001635 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001636 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001637
Somnath Kotur939cf302011-08-18 21:51:49 -07001638 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001639 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001640 rxcp->vlanf = 0;
1641 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001642
1643 /* As the compl has been parsed, reset it; we wont touch it again */
1644 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645
Sathya Perla3abcded2010-10-03 22:12:27 -07001646 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647 return rxcp;
1648}
1649
Eric Dumazet1829b082011-03-01 05:48:12 +00001650static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001655 gfp |= __GFP_COMP;
1656 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657}
1658
1659/*
1660 * Allocate a page, split it to fragments of size rx_frag_size and post as
1661 * receive buffers to BE
1662 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001663static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664{
Sathya Perla3abcded2010-10-03 22:12:27 -07001665 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001666 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001667 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668 struct page *pagep = NULL;
1669 struct be_eth_rx_d *rxd;
1670 u64 page_dmaaddr = 0, frag_dmaaddr;
1671 u32 posted, page_offset = 0;
1672
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1675 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001676 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001678 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 break;
1680 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001681 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1682 0, adapter->big_page_size,
1683 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 page_info->page_offset = 0;
1685 } else {
1686 get_page(pagep);
1687 page_info->page_offset = page_offset + rx_frag_size;
1688 }
1689 page_offset = page_info->page_offset;
1690 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001691 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1693
1694 rxd = queue_head_node(rxq);
1695 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1696 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697
1698 /* Any space left in the current big page for another frag? */
1699 if ((page_offset + rx_frag_size + rx_frag_size) >
1700 adapter->big_page_size) {
1701 pagep = NULL;
1702 page_info->last_page_user = true;
1703 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001704
1705 prev_page_info = page_info;
1706 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001707 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 }
1709 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001710 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711
1712 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001714 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001715 } else if (atomic_read(&rxq->used) == 0) {
1716 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001717 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719}
1720
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1724
1725 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1726 return NULL;
1727
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001728 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1730
1731 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1732
1733 queue_tail_inc(tx_cq);
1734 return txcp;
1735}
1736
Sathya Perla3c8def92011-06-12 20:01:58 +00001737static u16 be_tx_compl_process(struct be_adapter *adapter,
1738 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739{
Sathya Perla3c8def92011-06-12 20:01:58 +00001740 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001741 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001742 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001744 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1745 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001747 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001749 sent_skbs[txq->tail] = NULL;
1750
1751 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001752 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001754 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001756 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001757 unmap_tx_frag(&adapter->pdev->dev, wrb,
1758 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001759 unmap_skb_hdr = false;
1760
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 num_wrbs++;
1762 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001763 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001766 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767}
1768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001769/* Return the number of events in the event queue */
1770static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001771{
1772 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001774
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001775 do {
1776 eqe = queue_tail_node(&eqo->q);
1777 if (eqe->evt == 0)
1778 break;
1779
1780 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001781 eqe->evt = 0;
1782 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783 queue_tail_inc(&eqo->q);
1784 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001785
1786 return num;
1787}
1788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001789/* Leaves the EQ is disarmed state */
1790static void be_eq_clean(struct be_eq_obj *eqo)
1791{
1792 int num = events_get(eqo);
1793
1794 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1795}
1796
1797static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798{
1799 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 struct be_queue_info *rxq = &rxo->q;
1801 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001802 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001803 struct be_adapter *adapter = rxo->adapter;
1804 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 u16 tail;
1806
Sathya Perlad23e9462012-12-17 19:38:51 +00001807 /* Consume pending rx completions.
1808 * Wait for the flush completion (identified by zero num_rcvd)
1809 * to arrive. Notify CQ even when there are no more CQ entries
1810 * for HW to flush partially coalesced CQ entries.
1811 * In Lancer, there is no need to wait for flush compl.
1812 */
1813 for (;;) {
1814 rxcp = be_rx_compl_get(rxo);
1815 if (rxcp == NULL) {
1816 if (lancer_chip(adapter))
1817 break;
1818
1819 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1820 dev_warn(&adapter->pdev->dev,
1821 "did not receive flush compl\n");
1822 break;
1823 }
1824 be_cq_notify(adapter, rx_cq->id, true, 0);
1825 mdelay(1);
1826 } else {
1827 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001828 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001829 if (rxcp->num_rcvd == 0)
1830 break;
1831 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 }
1833
Sathya Perlad23e9462012-12-17 19:38:51 +00001834 /* After cleanup, leave the CQ in unarmed state */
1835 be_cq_notify(adapter, rx_cq->id, false, 0);
1836
1837 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001839 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 put_page(page_info->page);
1842 memset(page_info, 0, sizeof(*page_info));
1843 }
1844 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001845 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846}
1847
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001848static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001850 struct be_tx_obj *txo;
1851 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001852 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001853 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001854 struct sk_buff *sent_skb;
1855 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001856 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
Sathya Perlaa8e91792009-08-10 03:42:43 +00001858 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1859 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001860 pending_txqs = adapter->num_tx_qs;
1861
1862 for_all_tx_queues(adapter, txo, i) {
1863 txq = &txo->q;
1864 while ((txcp = be_tx_compl_get(&txo->cq))) {
1865 end_idx =
1866 AMAP_GET_BITS(struct amap_eth_tx_compl,
1867 wrb_index, txcp);
1868 num_wrbs += be_tx_compl_process(adapter, txo,
1869 end_idx);
1870 cmpl++;
1871 }
1872 if (cmpl) {
1873 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1874 atomic_sub(num_wrbs, &txq->used);
1875 cmpl = 0;
1876 num_wrbs = 0;
1877 }
1878 if (atomic_read(&txq->used) == 0)
1879 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001880 }
1881
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001882 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001883 break;
1884
1885 mdelay(1);
1886 } while (true);
1887
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001888 for_all_tx_queues(adapter, txo, i) {
1889 txq = &txo->q;
1890 if (atomic_read(&txq->used))
1891 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1892 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001893
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001894 /* free posted tx for which compls will never arrive */
1895 while (atomic_read(&txq->used)) {
1896 sent_skb = txo->sent_skb_list[txq->tail];
1897 end_idx = txq->tail;
1898 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1899 &dummy_wrb);
1900 index_adv(&end_idx, num_wrbs - 1, txq->len);
1901 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1902 atomic_sub(num_wrbs, &txq->used);
1903 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001904 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905}
1906
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001907static void be_evt_queues_destroy(struct be_adapter *adapter)
1908{
1909 struct be_eq_obj *eqo;
1910 int i;
1911
1912 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001913 if (eqo->q.created) {
1914 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001916 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 be_queue_free(adapter, &eqo->q);
1918 }
1919}
1920
1921static int be_evt_queues_create(struct be_adapter *adapter)
1922{
1923 struct be_queue_info *eq;
1924 struct be_eq_obj *eqo;
1925 int i, rc;
1926
Sathya Perla92bf14a2013-08-27 16:57:32 +05301927 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1928 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929
1930 for_all_evt_queues(adapter, eqo, i) {
1931 eqo->adapter = adapter;
1932 eqo->tx_budget = BE_TX_BUDGET;
1933 eqo->idx = i;
1934 eqo->max_eqd = BE_MAX_EQD;
1935 eqo->enable_aic = true;
1936
1937 eq = &eqo->q;
1938 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1939 sizeof(struct be_eq_entry));
1940 if (rc)
1941 return rc;
1942
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301943 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 if (rc)
1945 return rc;
1946 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001947 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948}
1949
Sathya Perla5fb379e2009-06-18 00:02:59 +00001950static void be_mcc_queues_destroy(struct be_adapter *adapter)
1951{
1952 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001953
Sathya Perla8788fdc2009-07-27 22:52:03 +00001954 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001955 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001956 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001957 be_queue_free(adapter, q);
1958
Sathya Perla8788fdc2009-07-27 22:52:03 +00001959 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001960 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001961 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001962 be_queue_free(adapter, q);
1963}
1964
1965/* Must be called only after TX qs are created as MCC shares TX EQ */
1966static int be_mcc_queues_create(struct be_adapter *adapter)
1967{
1968 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001969
Sathya Perla8788fdc2009-07-27 22:52:03 +00001970 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001971 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001972 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973 goto err;
1974
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001975 /* Use the default EQ for MCC completions */
1976 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977 goto mcc_cq_free;
1978
Sathya Perla8788fdc2009-07-27 22:52:03 +00001979 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1981 goto mcc_cq_destroy;
1982
Sathya Perla8788fdc2009-07-27 22:52:03 +00001983 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001984 goto mcc_q_free;
1985
1986 return 0;
1987
1988mcc_q_free:
1989 be_queue_free(adapter, q);
1990mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001991 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001992mcc_cq_free:
1993 be_queue_free(adapter, cq);
1994err:
1995 return -1;
1996}
1997
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998static void be_tx_queues_destroy(struct be_adapter *adapter)
1999{
2000 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002001 struct be_tx_obj *txo;
2002 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla3c8def92011-06-12 20:01:58 +00002004 for_all_tx_queues(adapter, txo, i) {
2005 q = &txo->q;
2006 if (q->created)
2007 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2008 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009
Sathya Perla3c8def92011-06-12 20:01:58 +00002010 q = &txo->cq;
2011 if (q->created)
2012 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2013 be_queue_free(adapter, q);
2014 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015}
2016
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002019 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002020 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302021 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022
Sathya Perla92bf14a2013-08-27 16:57:32 +05302023 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002024 if (adapter->num_tx_qs != MAX_TX_QS) {
2025 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002026 netif_set_real_num_tx_queues(adapter->netdev,
2027 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002028 rtnl_unlock();
2029 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002030
Sathya Perla3c8def92011-06-12 20:01:58 +00002031 for_all_tx_queues(adapter, txo, i) {
2032 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2034 sizeof(struct be_eth_tx_compl));
2035 if (status)
2036 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 /* If num_evt_qs is less than num_tx_qs, then more than
2039 * one txq share an eq
2040 */
2041 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2042 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2043 if (status)
2044 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002045 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047}
2048
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049static int be_tx_qs_create(struct be_adapter *adapter)
2050{
2051 struct be_tx_obj *txo;
2052 int i, status;
2053
2054 for_all_tx_queues(adapter, txo, i) {
2055 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2056 sizeof(struct be_eth_wrb));
2057 if (status)
2058 return status;
2059
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002060 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061 if (status)
2062 return status;
2063 }
2064
Sathya Perlad3791422012-09-28 04:39:44 +00002065 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2066 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 return 0;
2068}
2069
2070static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071{
2072 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 struct be_rx_obj *rxo;
2074 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 q = &rxo->cq;
2078 if (q->created)
2079 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2080 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082}
2083
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002084static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002085{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002086 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002087 struct be_rx_obj *rxo;
2088 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089
Sathya Perla92bf14a2013-08-27 16:57:32 +05302090 /* We can create as many RSS rings as there are EQs. */
2091 adapter->num_rx_qs = adapter->num_evt_qs;
2092
2093 /* We'll use RSS only if atleast 2 RSS rings are supported.
2094 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302096 if (adapter->num_rx_qs > 1)
2097 adapter->num_rx_qs++;
2098
Sathya Perla7f640062012-06-05 19:37:20 +00002099 if (adapter->num_rx_qs != MAX_RX_QS) {
2100 rtnl_lock();
2101 netif_set_real_num_rx_queues(adapter->netdev,
2102 adapter->num_rx_qs);
2103 rtnl_unlock();
2104 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002106 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002107 for_all_rx_queues(adapter, rxo, i) {
2108 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002109 cq = &rxo->cq;
2110 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2111 sizeof(struct be_eth_rx_compl));
2112 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002113 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2116 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002117 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002119 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120
Sathya Perlad3791422012-09-28 04:39:44 +00002121 dev_info(&adapter->pdev->dev,
2122 "created %d RSS queue(s) and 1 default RX queue\n",
2123 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002125}
2126
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127static irqreturn_t be_intx(int irq, void *dev)
2128{
Sathya Perlae49cc342012-11-27 19:50:02 +00002129 struct be_eq_obj *eqo = dev;
2130 struct be_adapter *adapter = eqo->adapter;
2131 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002133 /* IRQ is not expected when NAPI is scheduled as the EQ
2134 * will not be armed.
2135 * But, this can happen on Lancer INTx where it takes
2136 * a while to de-assert INTx or in BE2 where occasionaly
2137 * an interrupt may be raised even when EQ is unarmed.
2138 * If NAPI is already scheduled, then counting & notifying
2139 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002140 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002141 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002142 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002143 __napi_schedule(&eqo->napi);
2144 if (num_evts)
2145 eqo->spurious_intr = 0;
2146 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002147 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002148
2149 /* Return IRQ_HANDLED only for the the first spurious intr
2150 * after a valid intr to stop the kernel from branding
2151 * this irq as a bad one!
2152 */
2153 if (num_evts || eqo->spurious_intr++ == 0)
2154 return IRQ_HANDLED;
2155 else
2156 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157}
2158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162
Sathya Perla0b545a62012-11-23 00:27:18 +00002163 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2164 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 return IRQ_HANDLED;
2166}
2167
Sathya Perla2e588f82011-03-11 02:49:26 +00002168static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169{
Somnath Koture38b1702013-05-29 22:55:56 +00002170 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171}
2172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2174 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175{
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 struct be_adapter *adapter = rxo->adapter;
2177 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002178 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 u32 work_done;
2180
2181 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002182 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183 if (!rxcp)
2184 break;
2185
Sathya Perla12004ae2011-08-02 19:57:46 +00002186 /* Is it a flush compl that has no data */
2187 if (unlikely(rxcp->num_rcvd == 0))
2188 goto loop_continue;
2189
2190 /* Discard compl with partial DMA Lancer B0 */
2191 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002192 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002193 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002194 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002195
Sathya Perla12004ae2011-08-02 19:57:46 +00002196 /* On BE drop pkts that arrive due to imperfect filtering in
2197 * promiscuous mode on some skews
2198 */
2199 if (unlikely(rxcp->port != adapter->port_num &&
2200 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002202 goto loop_continue;
2203 }
2204
2205 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002207 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002209loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002210 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211 }
2212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 if (work_done) {
2214 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002215
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2217 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 return work_done;
2221}
2222
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2224 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002229 for (work_done = 0; work_done < budget; work_done++) {
2230 txcp = be_tx_compl_get(&txo->cq);
2231 if (!txcp)
2232 break;
2233 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002234 AMAP_GET_BITS(struct amap_eth_tx_compl,
2235 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236 }
2237
2238 if (work_done) {
2239 be_cq_notify(adapter, txo->cq.id, true, work_done);
2240 atomic_sub(num_wrbs, &txo->q.used);
2241
2242 /* As Tx wrbs have been freed up, wake up netdev queue
2243 * if it was stopped due to lack of tx wrbs. */
2244 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2245 atomic_read(&txo->q.used) < txo->q.len / 2) {
2246 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002247 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2250 tx_stats(txo)->tx_compl += work_done;
2251 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2252 }
2253 return (work_done < budget); /* Done */
2254}
Sathya Perla3c8def92011-06-12 20:01:58 +00002255
Jingoo Han4188e7d2013-08-05 18:02:02 +09002256static int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257{
2258 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2259 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002260 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002262
Sathya Perla0b545a62012-11-23 00:27:18 +00002263 num_evts = events_get(eqo);
2264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 /* Process all TXQs serviced by this EQ */
2266 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2267 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2268 eqo->tx_budget, i);
2269 if (!tx_done)
2270 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 }
2272
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 /* This loop will iterate twice for EQ0 in which
2274 * completions of the last RXQ (default one) are also processed
2275 * For other EQs the loop iterates only once
2276 */
2277 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2278 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2279 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002280 }
2281
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 if (is_mcc_eqo(eqo))
2283 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002284
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 if (max_work < budget) {
2286 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002287 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 } else {
2289 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002290 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002291 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293}
2294
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002295void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002296{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002297 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2298 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002299 u32 i;
2300
Sathya Perlad23e9462012-12-17 19:38:51 +00002301 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002302 return;
2303
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002304 if (lancer_chip(adapter)) {
2305 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2306 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2307 sliport_err1 = ioread32(adapter->db +
2308 SLIPORT_ERROR1_OFFSET);
2309 sliport_err2 = ioread32(adapter->db +
2310 SLIPORT_ERROR2_OFFSET);
2311 }
2312 } else {
2313 pci_read_config_dword(adapter->pdev,
2314 PCICFG_UE_STATUS_LOW, &ue_lo);
2315 pci_read_config_dword(adapter->pdev,
2316 PCICFG_UE_STATUS_HIGH, &ue_hi);
2317 pci_read_config_dword(adapter->pdev,
2318 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2319 pci_read_config_dword(adapter->pdev,
2320 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002321
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002322 ue_lo = (ue_lo & ~ue_lo_mask);
2323 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002324 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002325
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002326 /* On certain platforms BE hardware can indicate spurious UEs.
2327 * Allow the h/w to stop working completely in case of a real UE.
2328 * Hence not setting the hw_error for UE detection.
2329 */
2330 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002331 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002332 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002333 "Error detected in the card\n");
2334 }
2335
2336 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2337 dev_err(&adapter->pdev->dev,
2338 "ERR: sliport status 0x%x\n", sliport_status);
2339 dev_err(&adapter->pdev->dev,
2340 "ERR: sliport error1 0x%x\n", sliport_err1);
2341 dev_err(&adapter->pdev->dev,
2342 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002343 }
2344
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002345 if (ue_lo) {
2346 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2347 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002348 dev_err(&adapter->pdev->dev,
2349 "UE: %s bit set\n", ue_status_low_desc[i]);
2350 }
2351 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002352
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002353 if (ue_hi) {
2354 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2355 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002356 dev_err(&adapter->pdev->dev,
2357 "UE: %s bit set\n", ue_status_hi_desc[i]);
2358 }
2359 }
2360
2361}
2362
Sathya Perla8d56ff12009-11-22 22:02:26 +00002363static void be_msix_disable(struct be_adapter *adapter)
2364{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002365 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002366 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002367 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002368 }
2369}
2370
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002371static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302373 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002374 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375
Sathya Perla92bf14a2013-08-27 16:57:32 +05302376 /* If RoCE is supported, program the max number of NIC vectors that
2377 * may be configured via set-channels, along with vectors needed for
2378 * RoCe. Else, just program the number we'll use initially.
2379 */
2380 if (be_roce_supported(adapter))
2381 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2382 2 * num_online_cpus());
2383 else
2384 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002386 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 adapter->msix_entries[i].entry = i;
2388
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002389 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002390 if (status == 0) {
2391 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302392 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002393 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002394 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2395 num_vec);
2396 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002397 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002398 }
Sathya Perlad3791422012-09-28 04:39:44 +00002399
2400 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302401
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002402 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2403 if (!be_physfn(adapter))
2404 return status;
2405 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002406done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302407 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2408 adapter->num_msix_roce_vec = num_vec / 2;
2409 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2410 adapter->num_msix_roce_vec);
2411 }
2412
2413 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2414
2415 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2416 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002417 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418}
2419
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002420static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302423 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424}
2425
2426static int be_msix_register(struct be_adapter *adapter)
2427{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 struct net_device *netdev = adapter->netdev;
2429 struct be_eq_obj *eqo;
2430 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 for_all_evt_queues(adapter, eqo, i) {
2433 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2434 vec = be_msix_vec_get(adapter, eqo);
2435 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002436 if (status)
2437 goto err_msix;
2438 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002439
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002441err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2443 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2444 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2445 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002446 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447 return status;
2448}
2449
2450static int be_irq_register(struct be_adapter *adapter)
2451{
2452 struct net_device *netdev = adapter->netdev;
2453 int status;
2454
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002455 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456 status = be_msix_register(adapter);
2457 if (status == 0)
2458 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002459 /* INTx is not supported for VF */
2460 if (!be_physfn(adapter))
2461 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002462 }
2463
Sathya Perlae49cc342012-11-27 19:50:02 +00002464 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465 netdev->irq = adapter->pdev->irq;
2466 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002467 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002468 if (status) {
2469 dev_err(&adapter->pdev->dev,
2470 "INTx request IRQ failed - err %d\n", status);
2471 return status;
2472 }
2473done:
2474 adapter->isr_registered = true;
2475 return 0;
2476}
2477
2478static void be_irq_unregister(struct be_adapter *adapter)
2479{
2480 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002481 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002482 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002483
2484 if (!adapter->isr_registered)
2485 return;
2486
2487 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002488 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002489 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 goto done;
2491 }
2492
2493 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002494 for_all_evt_queues(adapter, eqo, i)
2495 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002496
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497done:
2498 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499}
2500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002501static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002502{
2503 struct be_queue_info *q;
2504 struct be_rx_obj *rxo;
2505 int i;
2506
2507 for_all_rx_queues(adapter, rxo, i) {
2508 q = &rxo->q;
2509 if (q->created) {
2510 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002511 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002512 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002513 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002514 }
2515}
2516
Sathya Perla889cd4b2010-05-30 23:33:45 +00002517static int be_close(struct net_device *netdev)
2518{
2519 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002520 struct be_eq_obj *eqo;
2521 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002522
Parav Pandit045508a2012-03-26 14:27:13 +00002523 be_roce_dev_close(adapter);
2524
Somnath Kotur04d3d622013-05-02 03:36:55 +00002525 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2526 for_all_evt_queues(adapter, eqo, i)
2527 napi_disable(&eqo->napi);
2528 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2529 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002530
2531 be_async_mcc_disable(adapter);
2532
2533 /* Wait for all pending tx completions to arrive so that
2534 * all tx skbs are freed.
2535 */
Sathya Perlafba87552013-05-08 02:05:50 +00002536 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302537 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002538
2539 be_rx_qs_destroy(adapter);
2540
2541 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542 if (msix_enabled(adapter))
2543 synchronize_irq(be_msix_vec_get(adapter, eqo));
2544 else
2545 synchronize_irq(netdev->irq);
2546 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002547 }
2548
Sathya Perla889cd4b2010-05-30 23:33:45 +00002549 be_irq_unregister(adapter);
2550
Sathya Perla482c9e72011-06-29 23:33:17 +00002551 return 0;
2552}
2553
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002554static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002555{
2556 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002557 int rc, i, j;
2558 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002559
2560 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2562 sizeof(struct be_eth_rx_d));
2563 if (rc)
2564 return rc;
2565 }
2566
2567 /* The FW would like the default RXQ to be created first */
2568 rxo = default_rxo(adapter);
2569 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2570 adapter->if_handle, false, &rxo->rss_id);
2571 if (rc)
2572 return rc;
2573
2574 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002575 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 rx_frag_size, adapter->if_handle,
2577 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002578 if (rc)
2579 return rc;
2580 }
2581
2582 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002583 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2584 for_all_rss_queues(adapter, rxo, i) {
2585 if ((j + i) >= 128)
2586 break;
2587 rsstable[j + i] = rxo->rss_id;
2588 }
2589 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002590 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2591 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2592
2593 if (!BEx_chip(adapter))
2594 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2595 RSS_ENABLE_UDP_IPV6;
2596
2597 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2598 128);
2599 if (rc) {
2600 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002601 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002602 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002603 }
2604
2605 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002606 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002607 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002608 return 0;
2609}
2610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611static int be_open(struct net_device *netdev)
2612{
2613 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002614 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002615 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002617 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002618 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002619
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002620 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002621 if (status)
2622 goto err;
2623
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002624 status = be_irq_register(adapter);
2625 if (status)
2626 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002627
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002628 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002629 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002630
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 for_all_tx_queues(adapter, txo, i)
2632 be_cq_notify(adapter, txo->cq.id, true, 0);
2633
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002634 be_async_mcc_enable(adapter);
2635
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 for_all_evt_queues(adapter, eqo, i) {
2637 napi_enable(&eqo->napi);
2638 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2639 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002640 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641
Sathya Perla323ff712012-09-28 04:39:43 +00002642 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002643 if (!status)
2644 be_link_status_update(adapter, link_status);
2645
Sathya Perlafba87552013-05-08 02:05:50 +00002646 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002647 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002648 return 0;
2649err:
2650 be_close(adapter->netdev);
2651 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652}
2653
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002654static int be_setup_wol(struct be_adapter *adapter, bool enable)
2655{
2656 struct be_dma_mem cmd;
2657 int status = 0;
2658 u8 mac[ETH_ALEN];
2659
2660 memset(mac, 0, ETH_ALEN);
2661
2662 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002663 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002664 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002665 if (cmd.va == NULL)
2666 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002667
2668 if (enable) {
2669 status = pci_write_config_dword(adapter->pdev,
2670 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2671 if (status) {
2672 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002673 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002674 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2675 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002676 return status;
2677 }
2678 status = be_cmd_enable_magic_wol(adapter,
2679 adapter->netdev->dev_addr, &cmd);
2680 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2681 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2682 } else {
2683 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2684 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2685 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2686 }
2687
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002688 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002689 return status;
2690}
2691
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002692/*
2693 * Generate a seed MAC address from the PF MAC Address using jhash.
2694 * MAC Address for VFs are assigned incrementally starting from the seed.
2695 * These addresses are programmed in the ASIC by the PF and the VF driver
2696 * queries for the MAC address during its probe.
2697 */
Sathya Perla4c876612013-02-03 20:30:11 +00002698static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002699{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002700 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002701 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002702 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002703 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002704
2705 be_vf_eth_addr_generate(adapter, mac);
2706
Sathya Perla11ac75e2011-12-13 00:58:50 +00002707 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302708 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002709 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002710 vf_cfg->if_handle,
2711 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302712 else
2713 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2714 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002715
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002716 if (status)
2717 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002718 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002719 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002720 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002721
2722 mac[5] += 1;
2723 }
2724 return status;
2725}
2726
Sathya Perla4c876612013-02-03 20:30:11 +00002727static int be_vfs_mac_query(struct be_adapter *adapter)
2728{
2729 int status, vf;
2730 u8 mac[ETH_ALEN];
2731 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302732 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002733
2734 for_all_vfs(adapter, vf_cfg, vf) {
2735 be_cmd_get_mac_from_list(adapter, mac, &active,
2736 &vf_cfg->pmac_id, 0);
2737
2738 status = be_cmd_mac_addr_query(adapter, mac, false,
2739 vf_cfg->if_handle, 0);
2740 if (status)
2741 return status;
2742 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2743 }
2744 return 0;
2745}
2746
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002747static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002748{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002749 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002750 u32 vf;
2751
Sathya Perla257a3fe2013-06-14 15:54:51 +05302752 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002753 dev_warn(&adapter->pdev->dev,
2754 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002755 goto done;
2756 }
2757
Sathya Perlab4c1df92013-05-08 02:05:47 +00002758 pci_disable_sriov(adapter->pdev);
2759
Sathya Perla11ac75e2011-12-13 00:58:50 +00002760 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302761 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002762 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2763 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302764 else
2765 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2766 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002767
Sathya Perla11ac75e2011-12-13 00:58:50 +00002768 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2769 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002770done:
2771 kfree(adapter->vf_cfg);
2772 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002773}
2774
Sathya Perlaa54769f2011-10-24 02:45:00 +00002775static int be_clear(struct be_adapter *adapter)
2776{
Sathya Perla2d17f402013-07-23 15:25:04 +05302777 int i;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002778
Sathya Perla191eb752012-02-23 18:50:13 +00002779 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2780 cancel_delayed_work_sync(&adapter->work);
2781 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2782 }
2783
Sathya Perla11ac75e2011-12-13 00:58:50 +00002784 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002785 be_vf_clear(adapter);
2786
Sathya Perla2d17f402013-07-23 15:25:04 +05302787 /* delete the primary mac along with the uc-mac list */
2788 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002789 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302790 adapter->pmac_id[i], 0);
2791 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002792
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002793 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002794
2795 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002797 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002799
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002800 kfree(adapter->pmac_id);
2801 adapter->pmac_id = NULL;
2802
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002804 return 0;
2805}
2806
Sathya Perla4c876612013-02-03 20:30:11 +00002807static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002808{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302809 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002810 struct be_vf_cfg *vf_cfg;
2811 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002812 int status;
2813
Sathya Perla4c876612013-02-03 20:30:11 +00002814 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2815 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002816
Sathya Perla4c876612013-02-03 20:30:11 +00002817 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302818 if (!BE3_chip(adapter)) {
2819 status = be_cmd_get_profile_config(adapter, &res,
2820 vf + 1);
2821 if (!status)
2822 cap_flags = res.if_cap_flags;
2823 }
Sathya Perla4c876612013-02-03 20:30:11 +00002824
2825 /* If a FW profile exists, then cap_flags are updated */
2826 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2827 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2828 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2829 &vf_cfg->if_handle, vf + 1);
2830 if (status)
2831 goto err;
2832 }
2833err:
2834 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002835}
2836
Sathya Perla39f1d942012-05-08 19:41:24 +00002837static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002838{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002839 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002840 int vf;
2841
Sathya Perla39f1d942012-05-08 19:41:24 +00002842 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2843 GFP_KERNEL);
2844 if (!adapter->vf_cfg)
2845 return -ENOMEM;
2846
Sathya Perla11ac75e2011-12-13 00:58:50 +00002847 for_all_vfs(adapter, vf_cfg, vf) {
2848 vf_cfg->if_handle = -1;
2849 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002850 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002851 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002852}
2853
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002854static int be_vf_setup(struct be_adapter *adapter)
2855{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002856 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002857 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002858 int status, old_vfs, vf;
2859 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302860 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002861
Sathya Perla257a3fe2013-06-14 15:54:51 +05302862 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002863 if (old_vfs) {
2864 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2865 if (old_vfs != num_vfs)
2866 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2867 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002868 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302869 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002870 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302871 be_max_vfs(adapter), num_vfs);
2872 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002873 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002874 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002875 }
2876
2877 status = be_vf_setup_init(adapter);
2878 if (status)
2879 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002880
Sathya Perla4c876612013-02-03 20:30:11 +00002881 if (old_vfs) {
2882 for_all_vfs(adapter, vf_cfg, vf) {
2883 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2884 if (status)
2885 goto err;
2886 }
2887 } else {
2888 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002889 if (status)
2890 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002891 }
2892
Sathya Perla4c876612013-02-03 20:30:11 +00002893 if (old_vfs) {
2894 status = be_vfs_mac_query(adapter);
2895 if (status)
2896 goto err;
2897 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002898 status = be_vf_eth_addr_config(adapter);
2899 if (status)
2900 goto err;
2901 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002902
Sathya Perla11ac75e2011-12-13 00:58:50 +00002903 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302904 /* Allow VFs to programs MAC/VLAN filters */
2905 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2906 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2907 status = be_cmd_set_fn_privileges(adapter,
2908 privileges |
2909 BE_PRIV_FILTMGMT,
2910 vf + 1);
2911 if (!status)
2912 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2913 vf);
2914 }
2915
Sathya Perla4c876612013-02-03 20:30:11 +00002916 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2917 * Allow full available bandwidth
2918 */
2919 if (BE3_chip(adapter) && !old_vfs)
2920 be_cmd_set_qos(adapter, 1000, vf+1);
2921
2922 status = be_cmd_link_status_query(adapter, &lnk_speed,
2923 NULL, vf + 1);
2924 if (!status)
2925 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002926
2927 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002928 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002929 if (status)
2930 goto err;
2931 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002932
2933 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002934 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002935
2936 if (!old_vfs) {
2937 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2938 if (status) {
2939 dev_err(dev, "SRIOV enable failed\n");
2940 adapter->num_vfs = 0;
2941 goto err;
2942 }
2943 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002944 return 0;
2945err:
Sathya Perla4c876612013-02-03 20:30:11 +00002946 dev_err(dev, "VF setup failed\n");
2947 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002948 return status;
2949}
2950
Sathya Perla92bf14a2013-08-27 16:57:32 +05302951/* On BE2/BE3 FW does not suggest the supported limits */
2952static void BEx_get_resources(struct be_adapter *adapter,
2953 struct be_resources *res)
2954{
2955 struct pci_dev *pdev = adapter->pdev;
2956 bool use_sriov = false;
2957
2958 if (BE3_chip(adapter) && be_physfn(adapter)) {
2959 int max_vfs;
2960
2961 max_vfs = pci_sriov_get_totalvfs(pdev);
2962 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2963 use_sriov = res->max_vfs && num_vfs;
2964 }
2965
2966 if (be_physfn(adapter))
2967 res->max_uc_mac = BE_UC_PMAC_COUNT;
2968 else
2969 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2970
2971 if (adapter->function_mode & FLEX10_MODE)
2972 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2973 else
2974 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2975 res->max_mcast_mac = BE_MAX_MC;
2976
2977 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2978 !be_physfn(adapter))
2979 res->max_tx_qs = 1;
2980 else
2981 res->max_tx_qs = BE3_MAX_TX_QS;
2982
2983 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2984 !use_sriov && be_physfn(adapter))
2985 res->max_rss_qs = (adapter->be3_native) ?
2986 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2987 res->max_rx_qs = res->max_rss_qs + 1;
2988
2989 res->max_evt_qs = be_physfn(adapter) ? MAX_EVT_QS : 1;
2990
2991 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2992 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2993 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2994}
2995
Sathya Perla30128032011-11-10 19:17:57 +00002996static void be_setup_init(struct be_adapter *adapter)
2997{
2998 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002999 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003000 adapter->if_handle = -1;
3001 adapter->be3_native = false;
3002 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003003 if (be_physfn(adapter))
3004 adapter->cmd_privileges = MAX_PRIVILEGES;
3005 else
3006 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003007}
3008
Sathya Perla92bf14a2013-08-27 16:57:32 +05303009static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003010{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303011 struct device *dev = &adapter->pdev->dev;
3012 struct be_resources res = {0};
3013 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003014
Sathya Perla92bf14a2013-08-27 16:57:32 +05303015 if (BEx_chip(adapter)) {
3016 BEx_get_resources(adapter, &res);
3017 adapter->res = res;
3018 }
3019
3020 /* For BE3 only check if FW suggests a different max-txqs value */
3021 if (BE3_chip(adapter)) {
3022 status = be_cmd_get_profile_config(adapter, &res, 0);
3023 if (!status && res.max_tx_qs)
3024 adapter->res.max_tx_qs =
3025 min(adapter->res.max_tx_qs, res.max_tx_qs);
3026 }
3027
3028 /* For Lancer, SH etc read per-function resource limits from FW.
3029 * GET_FUNC_CONFIG returns per function guaranteed limits.
3030 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3031 */
Sathya Perla4c876612013-02-03 20:30:11 +00003032 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303033 status = be_cmd_get_func_config(adapter, &res);
3034 if (status)
3035 return status;
3036
3037 /* If RoCE may be enabled stash away half the EQs for RoCE */
3038 if (be_roce_supported(adapter))
3039 res.max_evt_qs /= 2;
3040 adapter->res = res;
3041
3042 if (be_physfn(adapter)) {
3043 status = be_cmd_get_profile_config(adapter, &res, 0);
3044 if (status)
3045 return status;
3046 adapter->res.max_vfs = res.max_vfs;
3047 }
3048
3049 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3050 be_max_txqs(adapter), be_max_rxqs(adapter),
3051 be_max_rss(adapter), be_max_eqs(adapter),
3052 be_max_vfs(adapter));
3053 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3054 be_max_uc(adapter), be_max_mc(adapter),
3055 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003056 }
3057
Sathya Perla92bf14a2013-08-27 16:57:32 +05303058 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003059}
3060
Sathya Perla39f1d942012-05-08 19:41:24 +00003061/* Routine to query per function resource limits */
3062static int be_get_config(struct be_adapter *adapter)
3063{
Sathya Perla4c876612013-02-03 20:30:11 +00003064 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003065
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003066 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3067 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003068 &adapter->function_caps,
3069 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003070 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303071 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003072
Sathya Perla92bf14a2013-08-27 16:57:32 +05303073 status = be_get_resources(adapter);
3074 if (status)
3075 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003076
3077 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303078 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3079 GFP_KERNEL);
3080 if (!adapter->pmac_id)
3081 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003082
Sathya Perla92bf14a2013-08-27 16:57:32 +05303083 /* Sanitize cfg_num_qs based on HW and platform limits */
3084 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3085
3086 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003087}
3088
Sathya Perla95046b92013-07-23 15:25:02 +05303089static int be_mac_setup(struct be_adapter *adapter)
3090{
3091 u8 mac[ETH_ALEN];
3092 int status;
3093
3094 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3095 status = be_cmd_get_perm_mac(adapter, mac);
3096 if (status)
3097 return status;
3098
3099 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3100 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3101 } else {
3102 /* Maybe the HW was reset; dev_addr must be re-programmed */
3103 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3104 }
3105
3106 /* On BE3 VFs this cmd may fail due to lack of privilege.
3107 * Ignore the failure as in this case pmac_id is fetched
3108 * in the IFACE_CREATE cmd.
3109 */
3110 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3111 &adapter->pmac_id[0], 0);
3112 return 0;
3113}
3114
Sathya Perla5fb379e2009-06-18 00:02:59 +00003115static int be_setup(struct be_adapter *adapter)
3116{
Sathya Perla39f1d942012-05-08 19:41:24 +00003117 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003118 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003119 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121
Sathya Perla30128032011-11-10 19:17:57 +00003122 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003123
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003124 if (!lancer_chip(adapter))
3125 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003126
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003127 status = be_get_config(adapter);
3128 if (status)
3129 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003130
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003131 status = be_msix_enable(adapter);
3132 if (status)
3133 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003134
3135 status = be_evt_queues_create(adapter);
3136 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003137 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003138
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003139 status = be_tx_cqs_create(adapter);
3140 if (status)
3141 goto err;
3142
3143 status = be_rx_cqs_create(adapter);
3144 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003145 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146
Sathya Perla5fb379e2009-06-18 00:02:59 +00003147 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003148 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003149 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003151 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3152 /* In UMC mode FW does not return right privileges.
3153 * Override with correct privilege equivalent to PF.
3154 */
3155 if (be_is_mc(adapter))
3156 adapter->cmd_privileges = MAX_PRIVILEGES;
3157
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3159 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003160 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003161 en_flags |= BE_IF_FLAGS_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303162 en_flags = en_flags & be_if_cap_flags(adapter);
3163 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003164 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003165 if (status != 0)
3166 goto err;
3167
Sathya Perla95046b92013-07-23 15:25:02 +05303168 status = be_mac_setup(adapter);
3169 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003170 goto err;
3171
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003172 status = be_tx_qs_create(adapter);
3173 if (status)
3174 goto err;
3175
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003176 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003177
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003178 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003179 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003180
3181 be_set_rx_mode(adapter->netdev);
3182
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003183 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003184
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003185 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3186 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003187 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003188
Sathya Perla92bf14a2013-08-27 16:57:32 +05303189 if (be_physfn(adapter) && num_vfs) {
3190 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003191 be_vf_setup(adapter);
3192 else
3193 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003194 }
3195
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003196 status = be_cmd_get_phy_info(adapter);
3197 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003198 adapter->phy.fc_autoneg = 1;
3199
Sathya Perla191eb752012-02-23 18:50:13 +00003200 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3201 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003202 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003203err:
3204 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003205 return status;
3206}
3207
Ivan Vecera66268732011-12-08 01:31:21 +00003208#ifdef CONFIG_NET_POLL_CONTROLLER
3209static void be_netpoll(struct net_device *netdev)
3210{
3211 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003212 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003213 int i;
3214
Sathya Perlae49cc342012-11-27 19:50:02 +00003215 for_all_evt_queues(adapter, eqo, i) {
3216 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3217 napi_schedule(&eqo->napi);
3218 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003219
3220 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003221}
3222#endif
3223
Ajit Khaparde84517482009-09-04 03:12:16 +00003224#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003225static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003226
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003227static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003228 const u8 *p, u32 img_start, int image_size,
3229 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003230{
3231 u32 crc_offset;
3232 u8 flashed_crc[4];
3233 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003234
3235 crc_offset = hdr_size + img_start + image_size - 4;
3236
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003237 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003238
3239 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003240 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003241 if (status) {
3242 dev_err(&adapter->pdev->dev,
3243 "could not get crc from flash, not flashing redboot\n");
3244 return false;
3245 }
3246
3247 /*update redboot only if crc does not match*/
3248 if (!memcmp(flashed_crc, p, 4))
3249 return false;
3250 else
3251 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003252}
3253
Sathya Perla306f1342011-08-02 19:57:45 +00003254static bool phy_flashing_required(struct be_adapter *adapter)
3255{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003256 return (adapter->phy.phy_type == TN_8022 &&
3257 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003258}
3259
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003260static bool is_comp_in_ufi(struct be_adapter *adapter,
3261 struct flash_section_info *fsec, int type)
3262{
3263 int i = 0, img_type = 0;
3264 struct flash_section_info_g2 *fsec_g2 = NULL;
3265
Sathya Perlaca34fe32012-11-06 17:48:56 +00003266 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003267 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3268
3269 for (i = 0; i < MAX_FLASH_COMP; i++) {
3270 if (fsec_g2)
3271 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3272 else
3273 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3274
3275 if (img_type == type)
3276 return true;
3277 }
3278 return false;
3279
3280}
3281
Jingoo Han4188e7d2013-08-05 18:02:02 +09003282static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003283 int header_size,
3284 const struct firmware *fw)
3285{
3286 struct flash_section_info *fsec = NULL;
3287 const u8 *p = fw->data;
3288
3289 p += header_size;
3290 while (p < (fw->data + fw->size)) {
3291 fsec = (struct flash_section_info *)p;
3292 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3293 return fsec;
3294 p += 32;
3295 }
3296 return NULL;
3297}
3298
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003299static int be_flash(struct be_adapter *adapter, const u8 *img,
3300 struct be_dma_mem *flash_cmd, int optype, int img_size)
3301{
3302 u32 total_bytes = 0, flash_op, num_bytes = 0;
3303 int status = 0;
3304 struct be_cmd_write_flashrom *req = flash_cmd->va;
3305
3306 total_bytes = img_size;
3307 while (total_bytes) {
3308 num_bytes = min_t(u32, 32*1024, total_bytes);
3309
3310 total_bytes -= num_bytes;
3311
3312 if (!total_bytes) {
3313 if (optype == OPTYPE_PHY_FW)
3314 flash_op = FLASHROM_OPER_PHY_FLASH;
3315 else
3316 flash_op = FLASHROM_OPER_FLASH;
3317 } else {
3318 if (optype == OPTYPE_PHY_FW)
3319 flash_op = FLASHROM_OPER_PHY_SAVE;
3320 else
3321 flash_op = FLASHROM_OPER_SAVE;
3322 }
3323
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003324 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003325 img += num_bytes;
3326 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3327 flash_op, num_bytes);
3328 if (status) {
3329 if (status == ILLEGAL_IOCTL_REQ &&
3330 optype == OPTYPE_PHY_FW)
3331 break;
3332 dev_err(&adapter->pdev->dev,
3333 "cmd to write to flash rom failed.\n");
3334 return status;
3335 }
3336 }
3337 return 0;
3338}
3339
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003340/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003341static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003342 const struct firmware *fw,
3343 struct be_dma_mem *flash_cmd,
3344 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003345
Ajit Khaparde84517482009-09-04 03:12:16 +00003346{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003347 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003348 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003349 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003350 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003351 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003352 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003353
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003354 struct flash_comp gen3_flash_types[] = {
3355 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3356 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3357 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3358 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3359 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3360 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3361 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3362 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3363 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3364 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3365 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3366 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3367 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3368 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3369 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3370 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3371 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3372 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3373 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3374 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003375 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003376
3377 struct flash_comp gen2_flash_types[] = {
3378 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3379 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3380 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3381 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3382 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3383 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3384 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3385 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3386 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3387 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3388 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3389 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3390 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3391 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3392 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3393 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003394 };
3395
Sathya Perlaca34fe32012-11-06 17:48:56 +00003396 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003397 pflashcomp = gen3_flash_types;
3398 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003399 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003400 } else {
3401 pflashcomp = gen2_flash_types;
3402 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003403 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003404 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003405
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003406 /* Get flash section info*/
3407 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3408 if (!fsec) {
3409 dev_err(&adapter->pdev->dev,
3410 "Invalid Cookie. UFI corrupted ?\n");
3411 return -1;
3412 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003413 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003414 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003415 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003416
3417 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3418 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3419 continue;
3420
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003421 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3422 !phy_flashing_required(adapter))
3423 continue;
3424
3425 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3426 redboot = be_flash_redboot(adapter, fw->data,
3427 pflashcomp[i].offset, pflashcomp[i].size,
3428 filehdr_size + img_hdrs_size);
3429 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003430 continue;
3431 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003432
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003433 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003434 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003435 if (p + pflashcomp[i].size > fw->data + fw->size)
3436 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003437
3438 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3439 pflashcomp[i].size);
3440 if (status) {
3441 dev_err(&adapter->pdev->dev,
3442 "Flashing section type %d failed.\n",
3443 pflashcomp[i].img_type);
3444 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003445 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003446 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003447 return 0;
3448}
3449
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003450static int be_flash_skyhawk(struct be_adapter *adapter,
3451 const struct firmware *fw,
3452 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003453{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003454 int status = 0, i, filehdr_size = 0;
3455 int img_offset, img_size, img_optype, redboot;
3456 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3457 const u8 *p = fw->data;
3458 struct flash_section_info *fsec = NULL;
3459
3460 filehdr_size = sizeof(struct flash_file_hdr_g3);
3461 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3462 if (!fsec) {
3463 dev_err(&adapter->pdev->dev,
3464 "Invalid Cookie. UFI corrupted ?\n");
3465 return -1;
3466 }
3467
3468 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3469 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3470 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3471
3472 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3473 case IMAGE_FIRMWARE_iSCSI:
3474 img_optype = OPTYPE_ISCSI_ACTIVE;
3475 break;
3476 case IMAGE_BOOT_CODE:
3477 img_optype = OPTYPE_REDBOOT;
3478 break;
3479 case IMAGE_OPTION_ROM_ISCSI:
3480 img_optype = OPTYPE_BIOS;
3481 break;
3482 case IMAGE_OPTION_ROM_PXE:
3483 img_optype = OPTYPE_PXE_BIOS;
3484 break;
3485 case IMAGE_OPTION_ROM_FCoE:
3486 img_optype = OPTYPE_FCOE_BIOS;
3487 break;
3488 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3489 img_optype = OPTYPE_ISCSI_BACKUP;
3490 break;
3491 case IMAGE_NCSI:
3492 img_optype = OPTYPE_NCSI_FW;
3493 break;
3494 default:
3495 continue;
3496 }
3497
3498 if (img_optype == OPTYPE_REDBOOT) {
3499 redboot = be_flash_redboot(adapter, fw->data,
3500 img_offset, img_size,
3501 filehdr_size + img_hdrs_size);
3502 if (!redboot)
3503 continue;
3504 }
3505
3506 p = fw->data;
3507 p += filehdr_size + img_offset + img_hdrs_size;
3508 if (p + img_size > fw->data + fw->size)
3509 return -1;
3510
3511 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3512 if (status) {
3513 dev_err(&adapter->pdev->dev,
3514 "Flashing section type %d failed.\n",
3515 fsec->fsec_entry[i].type);
3516 return status;
3517 }
3518 }
3519 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003520}
3521
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003522static int lancer_fw_download(struct be_adapter *adapter,
3523 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003524{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003525#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3526#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3527 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003528 const u8 *data_ptr = NULL;
3529 u8 *dest_image_ptr = NULL;
3530 size_t image_size = 0;
3531 u32 chunk_size = 0;
3532 u32 data_written = 0;
3533 u32 offset = 0;
3534 int status = 0;
3535 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003536 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003537
3538 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3539 dev_err(&adapter->pdev->dev,
3540 "FW Image not properly aligned. "
3541 "Length must be 4 byte aligned.\n");
3542 status = -EINVAL;
3543 goto lancer_fw_exit;
3544 }
3545
3546 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3547 + LANCER_FW_DOWNLOAD_CHUNK;
3548 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003549 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003550 if (!flash_cmd.va) {
3551 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003552 goto lancer_fw_exit;
3553 }
3554
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003555 dest_image_ptr = flash_cmd.va +
3556 sizeof(struct lancer_cmd_req_write_object);
3557 image_size = fw->size;
3558 data_ptr = fw->data;
3559
3560 while (image_size) {
3561 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3562
3563 /* Copy the image chunk content. */
3564 memcpy(dest_image_ptr, data_ptr, chunk_size);
3565
3566 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003567 chunk_size, offset,
3568 LANCER_FW_DOWNLOAD_LOCATION,
3569 &data_written, &change_status,
3570 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003571 if (status)
3572 break;
3573
3574 offset += data_written;
3575 data_ptr += data_written;
3576 image_size -= data_written;
3577 }
3578
3579 if (!status) {
3580 /* Commit the FW written */
3581 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003582 0, offset,
3583 LANCER_FW_DOWNLOAD_LOCATION,
3584 &data_written, &change_status,
3585 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003586 }
3587
3588 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3589 flash_cmd.dma);
3590 if (status) {
3591 dev_err(&adapter->pdev->dev,
3592 "Firmware load error. "
3593 "Status code: 0x%x Additional Status: 0x%x\n",
3594 status, add_status);
3595 goto lancer_fw_exit;
3596 }
3597
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003598 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003599 status = lancer_physdev_ctrl(adapter,
3600 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003601 if (status) {
3602 dev_err(&adapter->pdev->dev,
3603 "Adapter busy for FW reset.\n"
3604 "New FW will not be active.\n");
3605 goto lancer_fw_exit;
3606 }
3607 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3608 dev_err(&adapter->pdev->dev,
3609 "System reboot required for new FW"
3610 " to be active\n");
3611 }
3612
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003613 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3614lancer_fw_exit:
3615 return status;
3616}
3617
Sathya Perlaca34fe32012-11-06 17:48:56 +00003618#define UFI_TYPE2 2
3619#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003620#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003621#define UFI_TYPE4 4
3622static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003623 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003624{
3625 if (fhdr == NULL)
3626 goto be_get_ufi_exit;
3627
Sathya Perlaca34fe32012-11-06 17:48:56 +00003628 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3629 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003630 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3631 if (fhdr->asic_type_rev == 0x10)
3632 return UFI_TYPE3R;
3633 else
3634 return UFI_TYPE3;
3635 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003636 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003637
3638be_get_ufi_exit:
3639 dev_err(&adapter->pdev->dev,
3640 "UFI and Interface are not compatible for flashing\n");
3641 return -1;
3642}
3643
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003644static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3645{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003646 struct flash_file_hdr_g3 *fhdr3;
3647 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003648 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003649 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003650 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003651
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003652 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003653 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3654 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003655 if (!flash_cmd.va) {
3656 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003657 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003658 }
3659
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003660 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003661 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003662
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003663 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003664
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003665 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3666 for (i = 0; i < num_imgs; i++) {
3667 img_hdr_ptr = (struct image_hdr *)(fw->data +
3668 (sizeof(struct flash_file_hdr_g3) +
3669 i * sizeof(struct image_hdr)));
3670 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003671 switch (ufi_type) {
3672 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003673 status = be_flash_skyhawk(adapter, fw,
3674 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003675 break;
3676 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003677 status = be_flash_BEx(adapter, fw, &flash_cmd,
3678 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003679 break;
3680 case UFI_TYPE3:
3681 /* Do not flash this ufi on BE3-R cards */
3682 if (adapter->asic_rev < 0x10)
3683 status = be_flash_BEx(adapter, fw,
3684 &flash_cmd,
3685 num_imgs);
3686 else {
3687 status = -1;
3688 dev_err(&adapter->pdev->dev,
3689 "Can't load BE3 UFI on BE3R\n");
3690 }
3691 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003692 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003693 }
3694
Sathya Perlaca34fe32012-11-06 17:48:56 +00003695 if (ufi_type == UFI_TYPE2)
3696 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003697 else if (ufi_type == -1)
3698 status = -1;
3699
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003700 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3701 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003702 if (status) {
3703 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003704 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003705 }
3706
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003707 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003708
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003709be_fw_exit:
3710 return status;
3711}
3712
3713int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3714{
3715 const struct firmware *fw;
3716 int status;
3717
3718 if (!netif_running(adapter->netdev)) {
3719 dev_err(&adapter->pdev->dev,
3720 "Firmware load not allowed (interface is down)\n");
3721 return -1;
3722 }
3723
3724 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3725 if (status)
3726 goto fw_exit;
3727
3728 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3729
3730 if (lancer_chip(adapter))
3731 status = lancer_fw_download(adapter, fw);
3732 else
3733 status = be_fw_download(adapter, fw);
3734
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003735 if (!status)
3736 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3737 adapter->fw_on_flash);
3738
Ajit Khaparde84517482009-09-04 03:12:16 +00003739fw_exit:
3740 release_firmware(fw);
3741 return status;
3742}
3743
stephen hemmingere5686ad2012-01-05 19:10:25 +00003744static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003745 .ndo_open = be_open,
3746 .ndo_stop = be_close,
3747 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003748 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003749 .ndo_set_mac_address = be_mac_addr_set,
3750 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003751 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003752 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3754 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003755 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003756 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003757 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003758 .ndo_get_vf_config = be_get_vf_config,
3759#ifdef CONFIG_NET_POLL_CONTROLLER
3760 .ndo_poll_controller = be_netpoll,
3761#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762};
3763
3764static void be_netdev_init(struct net_device *netdev)
3765{
3766 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003767 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003768 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003770 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003771 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003772 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003773 if (be_multi_rxq(adapter))
3774 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003775
3776 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003777 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003778
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003779 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003780 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003781
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003782 netdev->priv_flags |= IFF_UNICAST_FLT;
3783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 netdev->flags |= IFF_MULTICAST;
3785
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003786 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003787
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003788 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003789
3790 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3791
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003792 for_all_evt_queues(adapter, eqo, i)
3793 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003794}
3795
3796static void be_unmap_pci_bars(struct be_adapter *adapter)
3797{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003798 if (adapter->csr)
3799 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003800 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003801 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003802}
3803
Sathya Perlace66f782012-11-06 17:48:58 +00003804static int db_bar(struct be_adapter *adapter)
3805{
3806 if (lancer_chip(adapter) || !be_physfn(adapter))
3807 return 0;
3808 else
3809 return 4;
3810}
3811
3812static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003813{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003814 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003815 adapter->roce_db.size = 4096;
3816 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3817 db_bar(adapter));
3818 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3819 db_bar(adapter));
3820 }
Parav Pandit045508a2012-03-26 14:27:13 +00003821 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822}
3823
3824static int be_map_pci_bars(struct be_adapter *adapter)
3825{
3826 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003827 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003828
Sathya Perlace66f782012-11-06 17:48:58 +00003829 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3830 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3831 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003832
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003833 if (BEx_chip(adapter) && be_physfn(adapter)) {
3834 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3835 if (adapter->csr == NULL)
3836 return -ENOMEM;
3837 }
3838
Sathya Perlace66f782012-11-06 17:48:58 +00003839 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 if (addr == NULL)
3841 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003842 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003843
3844 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003846
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003847pci_map_err:
3848 be_unmap_pci_bars(adapter);
3849 return -ENOMEM;
3850}
3851
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003852static void be_ctrl_cleanup(struct be_adapter *adapter)
3853{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003854 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855
3856 be_unmap_pci_bars(adapter);
3857
3858 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003859 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3860 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003861
Sathya Perla5b8821b2011-08-02 19:57:44 +00003862 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003863 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003864 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3865 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003866}
3867
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868static int be_ctrl_init(struct be_adapter *adapter)
3869{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003870 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3871 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003872 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003873 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003874 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875
Sathya Perlace66f782012-11-06 17:48:58 +00003876 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3877 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3878 SLI_INTF_FAMILY_SHIFT;
3879 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003881 status = be_map_pci_bars(adapter);
3882 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003883 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003884
3885 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003886 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3887 mbox_mem_alloc->size,
3888 &mbox_mem_alloc->dma,
3889 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003890 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003891 status = -ENOMEM;
3892 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003893 }
3894 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3895 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3896 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3897 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003898
Sathya Perla5b8821b2011-08-02 19:57:44 +00003899 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3900 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003901 &rx_filter->dma,
3902 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003903 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003904 status = -ENOMEM;
3905 goto free_mbox;
3906 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003907
Ivan Vecera29849612010-12-14 05:43:19 +00003908 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003909 spin_lock_init(&adapter->mcc_lock);
3910 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003912 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003913 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003914 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003915
3916free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003917 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3918 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003919
3920unmap_pci_bars:
3921 be_unmap_pci_bars(adapter);
3922
3923done:
3924 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003925}
3926
3927static void be_stats_cleanup(struct be_adapter *adapter)
3928{
Sathya Perla3abcded2010-10-03 22:12:27 -07003929 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003930
3931 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003932 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3933 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003934}
3935
3936static int be_stats_init(struct be_adapter *adapter)
3937{
Sathya Perla3abcded2010-10-03 22:12:27 -07003938 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003939
Sathya Perlaca34fe32012-11-06 17:48:56 +00003940 if (lancer_chip(adapter))
3941 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3942 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003943 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003944 else
3945 /* BE3 and Skyhawk */
3946 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3947
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003948 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003949 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003950 if (cmd->va == NULL)
3951 return -1;
3952 return 0;
3953}
3954
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003955static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003956{
3957 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003958
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003959 if (!adapter)
3960 return;
3961
Parav Pandit045508a2012-03-26 14:27:13 +00003962 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003963 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003964
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003965 cancel_delayed_work_sync(&adapter->func_recovery_work);
3966
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003967 unregister_netdev(adapter->netdev);
3968
Sathya Perla5fb379e2009-06-18 00:02:59 +00003969 be_clear(adapter);
3970
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003971 /* tell fw we're done with firing cmds */
3972 be_cmd_fw_clean(adapter);
3973
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974 be_stats_cleanup(adapter);
3975
3976 be_ctrl_cleanup(adapter);
3977
Sathya Perlad6b6d982012-09-05 01:56:48 +00003978 pci_disable_pcie_error_reporting(pdev);
3979
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980 pci_set_drvdata(pdev, NULL);
3981 pci_release_regions(pdev);
3982 pci_disable_device(pdev);
3983
3984 free_netdev(adapter->netdev);
3985}
3986
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003987bool be_is_wol_supported(struct be_adapter *adapter)
3988{
3989 return ((adapter->wol_cap & BE_WOL_CAP) &&
3990 !be_is_wol_excluded(adapter)) ? true : false;
3991}
3992
Somnath Kotur941a77d2012-05-17 22:59:03 +00003993u32 be_get_fw_log_level(struct be_adapter *adapter)
3994{
3995 struct be_dma_mem extfat_cmd;
3996 struct be_fat_conf_params *cfgs;
3997 int status;
3998 u32 level = 0;
3999 int j;
4000
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004001 if (lancer_chip(adapter))
4002 return 0;
4003
Somnath Kotur941a77d2012-05-17 22:59:03 +00004004 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4005 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4006 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4007 &extfat_cmd.dma);
4008
4009 if (!extfat_cmd.va) {
4010 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4011 __func__);
4012 goto err;
4013 }
4014
4015 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4016 if (!status) {
4017 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4018 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004019 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004020 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4021 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4022 }
4023 }
4024 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4025 extfat_cmd.dma);
4026err:
4027 return level;
4028}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004029
Sathya Perla39f1d942012-05-08 19:41:24 +00004030static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004031{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004032 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004033 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004034
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004035 status = be_cmd_get_cntl_attributes(adapter);
4036 if (status)
4037 return status;
4038
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004039 status = be_cmd_get_acpi_wol_cap(adapter);
4040 if (status) {
4041 /* in case of a failure to get wol capabillities
4042 * check the exclusion list to determine WOL capability */
4043 if (!be_is_wol_excluded(adapter))
4044 adapter->wol_cap |= BE_WOL_CAP;
4045 }
4046
4047 if (be_is_wol_supported(adapter))
4048 adapter->wol = true;
4049
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004050 /* Must be a power of 2 or else MODULO will BUG_ON */
4051 adapter->be_get_temp_freq = 64;
4052
Somnath Kotur941a77d2012-05-17 22:59:03 +00004053 level = be_get_fw_log_level(adapter);
4054 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4055
Sathya Perla92bf14a2013-08-27 16:57:32 +05304056 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004057 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004058}
4059
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004060static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004061{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004062 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004063 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004064
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004065 status = lancer_test_and_set_rdy_state(adapter);
4066 if (status)
4067 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004068
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004069 if (netif_running(adapter->netdev))
4070 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004071
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004072 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004073
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004074 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004075
4076 status = be_setup(adapter);
4077 if (status)
4078 goto err;
4079
4080 if (netif_running(adapter->netdev)) {
4081 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004082 if (status)
4083 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004084 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004085
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004086 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004087 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004088err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004089 if (status == -EAGAIN)
4090 dev_err(dev, "Waiting for resource provisioning\n");
4091 else
4092 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004093
4094 return status;
4095}
4096
4097static void be_func_recovery_task(struct work_struct *work)
4098{
4099 struct be_adapter *adapter =
4100 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004101 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004102
4103 be_detect_error(adapter);
4104
4105 if (adapter->hw_error && lancer_chip(adapter)) {
4106
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004107 rtnl_lock();
4108 netif_device_detach(adapter->netdev);
4109 rtnl_unlock();
4110
4111 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004112 if (!status)
4113 netif_device_attach(adapter->netdev);
4114 }
4115
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004116 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4117 * no need to attempt further recovery.
4118 */
4119 if (!status || status == -EAGAIN)
4120 schedule_delayed_work(&adapter->func_recovery_work,
4121 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004122}
4123
4124static void be_worker(struct work_struct *work)
4125{
4126 struct be_adapter *adapter =
4127 container_of(work, struct be_adapter, work.work);
4128 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004129 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004130 int i;
4131
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004132 /* when interrupts are not yet enabled, just reap any pending
4133 * mcc completions */
4134 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004135 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004136 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004137 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004138 goto reschedule;
4139 }
4140
4141 if (!adapter->stats_cmd_sent) {
4142 if (lancer_chip(adapter))
4143 lancer_cmd_get_pport_stats(adapter,
4144 &adapter->stats_cmd);
4145 else
4146 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4147 }
4148
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304149 if (be_physfn(adapter) &&
4150 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004151 be_cmd_get_die_temperature(adapter);
4152
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004153 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004154 if (rxo->rx_post_starved) {
4155 rxo->rx_post_starved = false;
4156 be_post_rx_frags(rxo, GFP_KERNEL);
4157 }
4158 }
4159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004160 for_all_evt_queues(adapter, eqo, i)
4161 be_eqd_update(adapter, eqo);
4162
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004163reschedule:
4164 adapter->work_counter++;
4165 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4166}
4167
Sathya Perla257a3fe2013-06-14 15:54:51 +05304168/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004169static bool be_reset_required(struct be_adapter *adapter)
4170{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304171 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004172}
4173
Sathya Perlad3791422012-09-28 04:39:44 +00004174static char *mc_name(struct be_adapter *adapter)
4175{
4176 if (adapter->function_mode & FLEX10_MODE)
4177 return "FLEX10";
4178 else if (adapter->function_mode & VNIC_MODE)
4179 return "vNIC";
4180 else if (adapter->function_mode & UMC_ENABLED)
4181 return "UMC";
4182 else
4183 return "";
4184}
4185
4186static inline char *func_name(struct be_adapter *adapter)
4187{
4188 return be_physfn(adapter) ? "PF" : "VF";
4189}
4190
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004191static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004192{
4193 int status = 0;
4194 struct be_adapter *adapter;
4195 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004196 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004197
4198 status = pci_enable_device(pdev);
4199 if (status)
4200 goto do_none;
4201
4202 status = pci_request_regions(pdev, DRV_NAME);
4203 if (status)
4204 goto disable_dev;
4205 pci_set_master(pdev);
4206
Sathya Perla7f640062012-06-05 19:37:20 +00004207 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004208 if (netdev == NULL) {
4209 status = -ENOMEM;
4210 goto rel_reg;
4211 }
4212 adapter = netdev_priv(netdev);
4213 adapter->pdev = pdev;
4214 pci_set_drvdata(pdev, adapter);
4215 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004216 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004217
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004218 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004219 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004220 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4221 if (status < 0) {
4222 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4223 goto free_netdev;
4224 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225 netdev->features |= NETIF_F_HIGHDMA;
4226 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004227 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304228 if (!status)
4229 status = dma_set_coherent_mask(&pdev->dev,
4230 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231 if (status) {
4232 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4233 goto free_netdev;
4234 }
4235 }
4236
Sathya Perlad6b6d982012-09-05 01:56:48 +00004237 status = pci_enable_pcie_error_reporting(pdev);
4238 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004239 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004240
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004241 status = be_ctrl_init(adapter);
4242 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004243 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244
Sathya Perla2243e2e2009-11-22 22:02:03 +00004245 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004246 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004247 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004248 if (status)
4249 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004250 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004251
Sathya Perla39f1d942012-05-08 19:41:24 +00004252 if (be_reset_required(adapter)) {
4253 status = be_cmd_reset_function(adapter);
4254 if (status)
4255 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004256
Kalesh AP2d177be2013-04-28 22:22:29 +00004257 /* Wait for interrupts to quiesce after an FLR */
4258 msleep(100);
4259 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004260
4261 /* Allow interrupts for other ULPs running on NIC function */
4262 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004263
Kalesh AP2d177be2013-04-28 22:22:29 +00004264 /* tell fw we're ready to fire cmds */
4265 status = be_cmd_fw_init(adapter);
4266 if (status)
4267 goto ctrl_clean;
4268
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004269 status = be_stats_init(adapter);
4270 if (status)
4271 goto ctrl_clean;
4272
Sathya Perla39f1d942012-05-08 19:41:24 +00004273 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004274 if (status)
4275 goto stats_clean;
4276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004277 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004278 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004279 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004280
Sathya Perla5fb379e2009-06-18 00:02:59 +00004281 status = be_setup(adapter);
4282 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004283 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004284
Sathya Perla3abcded2010-10-03 22:12:27 -07004285 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004286 status = register_netdev(netdev);
4287 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004288 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004289
Parav Pandit045508a2012-03-26 14:27:13 +00004290 be_roce_dev_add(adapter);
4291
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004292 schedule_delayed_work(&adapter->func_recovery_work,
4293 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004294
4295 be_cmd_query_port_name(adapter, &port_name);
4296
Sathya Perlad3791422012-09-28 04:39:44 +00004297 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4298 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004299
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004300 return 0;
4301
Sathya Perla5fb379e2009-06-18 00:02:59 +00004302unsetup:
4303 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004304stats_clean:
4305 be_stats_cleanup(adapter);
4306ctrl_clean:
4307 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004308free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004309 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004310 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004311rel_reg:
4312 pci_release_regions(pdev);
4313disable_dev:
4314 pci_disable_device(pdev);
4315do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004316 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004317 return status;
4318}
4319
4320static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4321{
4322 struct be_adapter *adapter = pci_get_drvdata(pdev);
4323 struct net_device *netdev = adapter->netdev;
4324
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004325 if (adapter->wol)
4326 be_setup_wol(adapter, true);
4327
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004328 cancel_delayed_work_sync(&adapter->func_recovery_work);
4329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004330 netif_device_detach(netdev);
4331 if (netif_running(netdev)) {
4332 rtnl_lock();
4333 be_close(netdev);
4334 rtnl_unlock();
4335 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004336 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004337
4338 pci_save_state(pdev);
4339 pci_disable_device(pdev);
4340 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4341 return 0;
4342}
4343
4344static int be_resume(struct pci_dev *pdev)
4345{
4346 int status = 0;
4347 struct be_adapter *adapter = pci_get_drvdata(pdev);
4348 struct net_device *netdev = adapter->netdev;
4349
4350 netif_device_detach(netdev);
4351
4352 status = pci_enable_device(pdev);
4353 if (status)
4354 return status;
4355
Yijing Wang1ca01512013-06-27 20:53:42 +08004356 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004357 pci_restore_state(pdev);
4358
Sathya Perla2243e2e2009-11-22 22:02:03 +00004359 /* tell fw we're ready to fire cmds */
4360 status = be_cmd_fw_init(adapter);
4361 if (status)
4362 return status;
4363
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004364 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004365 if (netif_running(netdev)) {
4366 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004367 be_open(netdev);
4368 rtnl_unlock();
4369 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004370
4371 schedule_delayed_work(&adapter->func_recovery_work,
4372 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004373 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004374
4375 if (adapter->wol)
4376 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004378 return 0;
4379}
4380
Sathya Perla82456b02010-02-17 01:35:37 +00004381/*
4382 * An FLR will stop BE from DMAing any data.
4383 */
4384static void be_shutdown(struct pci_dev *pdev)
4385{
4386 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004387
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004388 if (!adapter)
4389 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004390
Sathya Perla0f4a6822011-03-21 20:49:28 +00004391 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004392 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004393
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004394 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004395
Ajit Khaparde57841862011-04-06 18:08:43 +00004396 be_cmd_reset_function(adapter);
4397
Sathya Perla82456b02010-02-17 01:35:37 +00004398 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004399}
4400
Sathya Perlacf588472010-02-14 21:22:01 +00004401static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4402 pci_channel_state_t state)
4403{
4404 struct be_adapter *adapter = pci_get_drvdata(pdev);
4405 struct net_device *netdev = adapter->netdev;
4406
4407 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4408
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004409 if (!adapter->eeh_error) {
4410 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004411
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004412 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004413
Sathya Perlacf588472010-02-14 21:22:01 +00004414 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004415 netif_device_detach(netdev);
4416 if (netif_running(netdev))
4417 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004418 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004419
4420 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004421 }
Sathya Perlacf588472010-02-14 21:22:01 +00004422
4423 if (state == pci_channel_io_perm_failure)
4424 return PCI_ERS_RESULT_DISCONNECT;
4425
4426 pci_disable_device(pdev);
4427
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004428 /* The error could cause the FW to trigger a flash debug dump.
4429 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004430 * can cause it not to recover; wait for it to finish.
4431 * Wait only for first function as it is needed only once per
4432 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004433 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004434 if (pdev->devfn == 0)
4435 ssleep(30);
4436
Sathya Perlacf588472010-02-14 21:22:01 +00004437 return PCI_ERS_RESULT_NEED_RESET;
4438}
4439
4440static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4441{
4442 struct be_adapter *adapter = pci_get_drvdata(pdev);
4443 int status;
4444
4445 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004446
4447 status = pci_enable_device(pdev);
4448 if (status)
4449 return PCI_ERS_RESULT_DISCONNECT;
4450
4451 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004452 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004453 pci_restore_state(pdev);
4454
4455 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004456 dev_info(&adapter->pdev->dev,
4457 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004458 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004459 if (status)
4460 return PCI_ERS_RESULT_DISCONNECT;
4461
Sathya Perlad6b6d982012-09-05 01:56:48 +00004462 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004463 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004464 return PCI_ERS_RESULT_RECOVERED;
4465}
4466
4467static void be_eeh_resume(struct pci_dev *pdev)
4468{
4469 int status = 0;
4470 struct be_adapter *adapter = pci_get_drvdata(pdev);
4471 struct net_device *netdev = adapter->netdev;
4472
4473 dev_info(&adapter->pdev->dev, "EEH resume\n");
4474
4475 pci_save_state(pdev);
4476
Kalesh AP2d177be2013-04-28 22:22:29 +00004477 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004478 if (status)
4479 goto err;
4480
Kalesh AP2d177be2013-04-28 22:22:29 +00004481 /* tell fw we're ready to fire cmds */
4482 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004483 if (status)
4484 goto err;
4485
Sathya Perlacf588472010-02-14 21:22:01 +00004486 status = be_setup(adapter);
4487 if (status)
4488 goto err;
4489
4490 if (netif_running(netdev)) {
4491 status = be_open(netdev);
4492 if (status)
4493 goto err;
4494 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004495
4496 schedule_delayed_work(&adapter->func_recovery_work,
4497 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004498 netif_device_attach(netdev);
4499 return;
4500err:
4501 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004502}
4503
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004504static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004505 .error_detected = be_eeh_err_detected,
4506 .slot_reset = be_eeh_reset,
4507 .resume = be_eeh_resume,
4508};
4509
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004510static struct pci_driver be_driver = {
4511 .name = DRV_NAME,
4512 .id_table = be_dev_ids,
4513 .probe = be_probe,
4514 .remove = be_remove,
4515 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004516 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004517 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004518 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004519};
4520
4521static int __init be_init_module(void)
4522{
Joe Perches8e95a202009-12-03 07:58:21 +00004523 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4524 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004525 printk(KERN_WARNING DRV_NAME
4526 " : Module param rx_frag_size must be 2048/4096/8192."
4527 " Using 2048\n");
4528 rx_frag_size = 2048;
4529 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004530
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531 return pci_register_driver(&be_driver);
4532}
4533module_init(be_init_module);
4534
4535static void __exit be_exit_module(void)
4536{
4537 pci_unregister_driver(&be_driver);
4538}
4539module_exit(be_exit_module);