blob: 50116f8ed576c0ae76d051e63d0d767c2df2fb49 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Sathya Perla5a712c12013-07-23 15:24:59 +0530259 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * privilege or if PF did not provision the new MAC address.
261 * On BE3, this cmd will always fail if the VF doesn't have the
262 * FILTMGMT privilege. This failure is OK, only if the PF programmed
263 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000264 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266 adapter->if_handle, &adapter->pmac_id[0], 0);
267 if (!status) {
268 curr_pmac_id = adapter->pmac_id[0];
269
270 /* Delete the old programmed MAC. This call may fail if the
271 * old MAC was already deleted by the PF driver.
272 */
273 if (adapter->pmac_id[0] != old_pmac_id)
274 be_cmd_pmac_del(adapter, adapter->if_handle,
275 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 }
277
Sathya Perla5a712c12013-07-23 15:24:59 +0530278 /* Decide if the new MAC is successfully activated only after
279 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000280 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530281 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000282 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000283 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The MAC change did not happen, either due to lack of privilege
286 * or PF didn't pre-provision.
287 */
288 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289 status = -EPERM;
290 goto err;
291 }
292
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530294 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 return 0;
296err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Jingoo Han4188e7d2013-08-05 18:02:02 +0900475static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530785
786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795
796 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000798 if (unlikely(!skb))
799 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000800 skb->vlan_tci = 0;
801 }
802
803 /* Insert the outer VLAN, if any */
804 if (adapter->qnq_vid) {
805 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400806 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000807 if (unlikely(!skb))
808 return skb;
809 if (skip_hw_vlan)
810 *skip_hw_vlan = true;
811 }
812
Somnath Kotur93040ae2012-06-26 22:32:10 +0000813 return skb;
814}
815
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817{
818 struct ethhdr *eh = (struct ethhdr *)skb->data;
819 u16 offset = ETH_HLEN;
820
821 if (eh->h_proto == htons(ETH_P_IPV6)) {
822 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824 offset += sizeof(struct ipv6hdr);
825 if (ip6h->nexthdr != NEXTHDR_TCP &&
826 ip6h->nexthdr != NEXTHDR_UDP) {
827 struct ipv6_opt_hdr *ehdr =
828 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831 if (ehdr->hdrlen == 0xff)
832 return true;
833 }
834 }
835 return false;
836}
837
838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839{
840 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000845{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000846 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000847}
848
Sathya Perlaee9c7992013-05-22 23:04:55 +0000849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850 struct sk_buff *skb,
851 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000853 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000854 unsigned int eth_hdr_len;
855 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000856
Somnath Kotur48265662013-05-26 21:08:47 +0000857 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858 * may cause a transmit stall on that port. So the work-around is to
859 * pad such packets to a 36-byte length.
860 */
861 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862 if (skb_padto(skb, 36))
863 goto tx_drop;
864 skb->len = 36;
865 }
866
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000869 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000870 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000871 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000873 if (skb->len <= 60 &&
874 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000875 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000876 ip = (struct iphdr *)ip_hdr(skb);
877 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878 }
879
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000880 /* If vlan tag is already inlined in the packet, skip HW VLAN
881 * tagging in UMC mode
882 */
883 if ((adapter->function_mode & UMC_ENABLED) &&
884 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000885 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000886
Somnath Kotur93040ae2012-06-26 22:32:10 +0000887 /* HW has a bug wherein it will calculate CSUM for VLAN
888 * pkts even though it is disabled.
889 * Manually insert VLAN in pkt.
890 */
891 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000892 vlan_tx_tag_present(skb)) {
893 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 if (unlikely(!skb))
895 goto tx_drop;
896 }
897
898 /* HW may lockup when VLAN HW tagging is requested on
899 * certain ipv6 packets. Drop such pkts if the HW workaround to
900 * skip HW tagging is not enabled by FW.
901 */
902 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000903 (adapter->pvid || adapter->qnq_vid) &&
904 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000905 goto tx_drop;
906
907 /* Manual VLAN tag insertion to prevent:
908 * ASIC lockup when the ASIC inserts VLAN tag into
909 * certain ipv6 packets. Insert VLAN tags in driver,
910 * and set event, completion, vlan bits accordingly
911 * in the Tx WRB.
912 */
913 if (be_ipv6_tx_stall_chk(adapter, skb) &&
914 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000915 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000916 if (unlikely(!skb))
917 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000918 }
919
Sathya Perlaee9c7992013-05-22 23:04:55 +0000920 return skb;
921tx_drop:
922 dev_kfree_skb_any(skb);
923 return NULL;
924}
925
926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930 struct be_queue_info *txq = &txo->q;
931 bool dummy_wrb, stopped = false;
932 u32 wrb_cnt = 0, copied = 0;
933 bool skip_hw_vlan = false;
934 u32 start = txq->head;
935
936 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937 if (!skb)
938 return NETDEV_TX_OK;
939
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000940 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000942 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000944 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000945 int gso_segs = skb_shinfo(skb)->gso_segs;
946
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000947 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000948 BUG_ON(txo->sent_skb_list[start]);
949 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000951 /* Ensure txq has space for the next skb; Else stop the queue
952 * *BEFORE* ringing the tx doorbell, so that we serialze the
953 * tx compls of the current transmit which'll wake up the queue
954 */
Sathya Perla7101e112010-03-22 20:41:12 +0000955 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000956 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000958 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000959 stopped = true;
960 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000962 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000963
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000964 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000965 } else {
966 txq->head = start;
967 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700968 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 return NETDEV_TX_OK;
970}
971
972static int be_change_mtu(struct net_device *netdev, int new_mtu)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000976 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978 dev_info(&adapter->pdev->dev,
979 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000980 BE_MIN_MTU,
981 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 return -EINVAL;
983 }
984 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985 netdev->mtu, new_mtu);
986 netdev->mtu = new_mtu;
987 return 0;
988}
989
990/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993 */
Sathya Perla10329df2012-06-05 19:37:18 +0000994static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995{
Sathya Perla10329df2012-06-05 19:37:18 +0000996 u16 vids[BE_NUM_VLANS_SUPPORTED];
997 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000998 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001000 /* No need to further configure vids if in promiscuous mode */
1001 if (adapter->promiscuous)
1002 return 0;
1003
Sathya Perla92bf14a2013-08-27 16:57:32 +05301004 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001005 goto set_vlan_promisc;
1006
1007 /* Construct VLAN Table to give to HW */
1008 for (i = 0; i < VLAN_N_VID; i++)
1009 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001010 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001011
1012 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001013 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001014
1015 /* Set to VLAN promisc mode as setting VLAN filter failed */
1016 if (status) {
1017 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001021
Sathya Perlab31c50a2009-09-17 10:30:13 -07001022 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001023
1024set_vlan_promisc:
1025 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026 NULL, 0, 1, 1);
1027 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028}
1029
Patrick McHardy80d5c362013-04-19 02:04:28 +00001030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031{
1032 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001033 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001035 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001036 status = -EINVAL;
1037 goto ret;
1038 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001039
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001040 /* Packets with VID 0 are always received by Lancer by default */
1041 if (lancer_chip(adapter) && vid == 0)
1042 goto ret;
1043
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301045 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001046 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001047
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001048 if (!status)
1049 adapter->vlans_added++;
1050 else
1051 adapter->vlan_tag[vid] = 0;
1052ret:
1053 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054}
1055
Patrick McHardy80d5c362013-04-19 02:04:28 +00001056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001059 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001061 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001062 status = -EINVAL;
1063 goto ret;
1064 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001065
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001066 /* Packets with VID 0 are always received by Lancer by default */
1067 if (lancer_chip(adapter) && vid == 0)
1068 goto ret;
1069
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301071 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001072 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001073
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001074 if (!status)
1075 adapter->vlans_added--;
1076 else
1077 adapter->vlan_tag[vid] = 1;
1078ret:
1079 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080}
1081
Sathya Perlaa54769f2011-10-24 02:45:00 +00001082static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086
1087 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001088 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001089 adapter->promiscuous = true;
1090 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001092
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001093 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001094 if (adapter->promiscuous) {
1095 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001096 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001097
1098 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001099 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001100 }
1101
Sathya Perlae7b909a2009-11-22 22:01:10 +00001102 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001103 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001106 goto done;
1107 }
1108
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001109 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110 struct netdev_hw_addr *ha;
1111 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114 be_cmd_pmac_del(adapter, adapter->if_handle,
1115 adapter->pmac_id[i], 0);
1116 }
1117
Sathya Perla92bf14a2013-08-27 16:57:32 +05301118 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true;
1121 goto done;
1122 }
1123
1124 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125 adapter->uc_macs++; /* First slot is for Primary MAC */
1126 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127 adapter->if_handle,
1128 &adapter->pmac_id[adapter->uc_macs], 0);
1129 }
1130 }
1131
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001132 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135 if (status) {
1136 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001140done:
1141 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142}
1143
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001148 int status;
1149
Sathya Perla11ac75e2011-12-13 00:58:50 +00001150 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001151 return -EPERM;
1152
Sathya Perla11ac75e2011-12-13 00:58:50 +00001153 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001154 return -EINVAL;
1155
Sathya Perla3175d8c2013-07-23 15:25:03 +05301156 if (BEx_chip(adapter)) {
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1158 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001159
Sathya Perla11ac75e2011-12-13 00:58:50 +00001160 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1161 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301162 } else {
1163 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1164 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001165 }
1166
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001167 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001168 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1169 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001170 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001171 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001172
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001173 return status;
1174}
1175
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001176static int be_get_vf_config(struct net_device *netdev, int vf,
1177 struct ifla_vf_info *vi)
1178{
1179 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001180 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001181
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001183 return -EPERM;
1184
Sathya Perla11ac75e2011-12-13 00:58:50 +00001185 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001186 return -EINVAL;
1187
1188 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001189 vi->tx_rate = vf_cfg->tx_rate;
1190 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001191 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001192 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001193
1194 return 0;
1195}
1196
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001197static int be_set_vf_vlan(struct net_device *netdev,
1198 int vf, u16 vlan, u8 qos)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
1201 int status = 0;
1202
Sathya Perla11ac75e2011-12-13 00:58:50 +00001203 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001204 return -EPERM;
1205
Sathya Perla11ac75e2011-12-13 00:58:50 +00001206 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001207 return -EINVAL;
1208
1209 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001210 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1211 /* If this is new value, program it. Else skip. */
1212 adapter->vf_cfg[vf].vlan_tag = vlan;
1213
1214 status = be_cmd_set_hsw_config(adapter, vlan,
1215 vf + 1, adapter->vf_cfg[vf].if_handle);
1216 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001217 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001218 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001219 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001220 vlan = adapter->vf_cfg[vf].def_vid;
1221 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1222 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001223 }
1224
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001225
1226 if (status)
1227 dev_info(&adapter->pdev->dev,
1228 "VLAN %d config on VF %d failed\n", vlan, vf);
1229 return status;
1230}
1231
Ajit Khapardee1d18732010-07-23 01:52:13 +00001232static int be_set_vf_tx_rate(struct net_device *netdev,
1233 int vf, int rate)
1234{
1235 struct be_adapter *adapter = netdev_priv(netdev);
1236 int status = 0;
1237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001239 return -EPERM;
1240
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001241 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001242 return -EINVAL;
1243
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001244 if (rate < 100 || rate > 10000) {
1245 dev_err(&adapter->pdev->dev,
1246 "tx rate must be between 100 and 10000 Mbps\n");
1247 return -EINVAL;
1248 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001249
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001250 if (lancer_chip(adapter))
1251 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1252 else
1253 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001254
1255 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001256 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001257 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001258 else
1259 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001260 return status;
1261}
1262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001266 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001267 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001268 u64 pkts;
1269 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001277 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
Sathya Perla4097f662009-03-24 16:40:13 -07001281 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001284 return;
1285 }
1286
Sathya Perlaac124ff2011-07-25 19:10:14 +00001287 /* Update once a second */
1288 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001289 return;
1290
Sathya Perlaab1594e2011-07-25 19:10:15 +00001291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001297 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001298 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001302 if (eqd < 10)
1303 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304
1305modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001309 }
Sathya Perla4097f662009-03-24 16:40:13 -07001310}
1311
Sathya Perla3abcded2010-10-03 22:12:27 -07001312static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001314{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001315 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001316
Sathya Perlaab1594e2011-07-25 19:10:15 +00001317 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001318 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001320 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001322 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001324 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001325 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326}
1327
Sathya Perla2e588f82011-03-11 02:49:26 +00001328static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001329{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001334}
1335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001339 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 BUG_ON(!rx_page_info->page);
1345
Ajit Khaparde205859a2010-02-09 01:34:21 +00001346 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001350 rx_page_info->last_page_user = false;
1351 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355}
1356
1357/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360{
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001365 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001369 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 }
1371}
1372
1373/*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla3abcded2010-10-03 22:12:27 -07001380 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 u8 *start;
1385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001395 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001404 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001409 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 skb->tail += hdr_len;
1411 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001412 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 }
1418
1419 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001430 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
Eric Dumazet9e903e02011-10-18 21:00:24 +00001439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001442 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001445 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001447 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001450/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001454 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001455 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001457
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001459 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001460 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001461 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 return;
1463 }
1464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001465 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001468 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001469 else
1470 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001472 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001474 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001475 skb->rxhash = rxcp->rss_hash;
1476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Jiri Pirko343e43c2011-08-25 02:50:51 +00001478 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001480
1481 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482}
1483
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001484/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001485static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1486 struct napi_struct *napi,
1487 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001489 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001491 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001492 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001493 u16 remaining, curr_frag_len;
1494 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001495
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001496 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001497 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001499 return;
1500 }
1501
Sathya Perla2e588f82011-03-11 02:49:26 +00001502 remaining = rxcp->pkt_size;
1503 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001504 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505
1506 curr_frag_len = min(remaining, rx_frag_size);
1507
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001508 /* Coalesce all frags from the same physical page in one slot */
1509 if (i == 0 || page_info->page_offset == 0) {
1510 /* First frag or Fresh page */
1511 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001512 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001513 skb_shinfo(skb)->frags[j].page_offset =
1514 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001515 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001516 } else {
1517 put_page(page_info->page);
1518 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001519 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001520 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 memset(page_info, 0, sizeof(*page_info));
1524 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001525 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001527 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 skb->len = rxcp->pkt_size;
1529 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001530 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001531 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001532 if (adapter->netdev->features & NETIF_F_RXHASH)
1533 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001534
Jiri Pirko343e43c2011-08-25 02:50:51 +00001535 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001536 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001537
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001538 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539}
1540
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001541static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1542 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543{
Sathya Perla2e588f82011-03-11 02:49:26 +00001544 rxcp->pkt_size =
1545 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1546 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1547 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1548 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001549 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001550 rxcp->ip_csum =
1551 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1552 rxcp->l4_csum =
1553 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1554 rxcp->ipv6 =
1555 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1556 rxcp->rxq_idx =
1557 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1558 rxcp->num_rcvd =
1559 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1560 rxcp->pkt_type =
1561 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001562 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001563 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001564 if (rxcp->vlanf) {
1565 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001566 compl);
1567 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1568 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001569 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001570 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001571}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001573static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1574 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001575{
1576 rxcp->pkt_size =
1577 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1578 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1579 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1580 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001581 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001582 rxcp->ip_csum =
1583 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1584 rxcp->l4_csum =
1585 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1586 rxcp->ipv6 =
1587 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1588 rxcp->rxq_idx =
1589 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1590 rxcp->num_rcvd =
1591 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1592 rxcp->pkt_type =
1593 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001594 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001595 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001596 if (rxcp->vlanf) {
1597 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001598 compl);
1599 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1600 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001601 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001602 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001603 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1604 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001605}
1606
1607static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1608{
1609 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1610 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1611 struct be_adapter *adapter = rxo->adapter;
1612
1613 /* For checking the valid bit it is Ok to use either definition as the
1614 * valid bit is at the same position in both v0 and v1 Rx compl */
1615 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 return NULL;
1617
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001618 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001619 be_dws_le_to_cpu(compl, sizeof(*compl));
1620
1621 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001622 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001623 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001625
Somnath Koture38b1702013-05-29 22:55:56 +00001626 if (rxcp->ip_frag)
1627 rxcp->l4_csum = 0;
1628
Sathya Perla15d72182011-03-21 20:49:26 +00001629 if (rxcp->vlanf) {
1630 /* vlanf could be wrongly set in some cards.
1631 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001632 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001633 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001634
Sathya Perla15d72182011-03-21 20:49:26 +00001635 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001636 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001637
Somnath Kotur939cf302011-08-18 21:51:49 -07001638 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001639 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001640 rxcp->vlanf = 0;
1641 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001642
1643 /* As the compl has been parsed, reset it; we wont touch it again */
1644 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645
Sathya Perla3abcded2010-10-03 22:12:27 -07001646 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647 return rxcp;
1648}
1649
Eric Dumazet1829b082011-03-01 05:48:12 +00001650static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001655 gfp |= __GFP_COMP;
1656 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657}
1658
1659/*
1660 * Allocate a page, split it to fragments of size rx_frag_size and post as
1661 * receive buffers to BE
1662 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001663static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664{
Sathya Perla3abcded2010-10-03 22:12:27 -07001665 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001666 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001667 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668 struct page *pagep = NULL;
1669 struct be_eth_rx_d *rxd;
1670 u64 page_dmaaddr = 0, frag_dmaaddr;
1671 u32 posted, page_offset = 0;
1672
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1675 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001676 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001678 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 break;
1680 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001681 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1682 0, adapter->big_page_size,
1683 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 page_info->page_offset = 0;
1685 } else {
1686 get_page(pagep);
1687 page_info->page_offset = page_offset + rx_frag_size;
1688 }
1689 page_offset = page_info->page_offset;
1690 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001691 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1693
1694 rxd = queue_head_node(rxq);
1695 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1696 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697
1698 /* Any space left in the current big page for another frag? */
1699 if ((page_offset + rx_frag_size + rx_frag_size) >
1700 adapter->big_page_size) {
1701 pagep = NULL;
1702 page_info->last_page_user = true;
1703 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001704
1705 prev_page_info = page_info;
1706 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001707 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 }
1709 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001710 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711
1712 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001714 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001715 } else if (atomic_read(&rxq->used) == 0) {
1716 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001717 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719}
1720
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1724
1725 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1726 return NULL;
1727
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001728 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1730
1731 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1732
1733 queue_tail_inc(tx_cq);
1734 return txcp;
1735}
1736
Sathya Perla3c8def92011-06-12 20:01:58 +00001737static u16 be_tx_compl_process(struct be_adapter *adapter,
1738 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739{
Sathya Perla3c8def92011-06-12 20:01:58 +00001740 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001741 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001742 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001744 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1745 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001747 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001749 sent_skbs[txq->tail] = NULL;
1750
1751 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001752 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001754 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001756 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001757 unmap_tx_frag(&adapter->pdev->dev, wrb,
1758 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001759 unmap_skb_hdr = false;
1760
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 num_wrbs++;
1762 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001763 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001766 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767}
1768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001769/* Return the number of events in the event queue */
1770static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001771{
1772 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001774
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001775 do {
1776 eqe = queue_tail_node(&eqo->q);
1777 if (eqe->evt == 0)
1778 break;
1779
1780 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001781 eqe->evt = 0;
1782 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783 queue_tail_inc(&eqo->q);
1784 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001785
1786 return num;
1787}
1788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001789/* Leaves the EQ is disarmed state */
1790static void be_eq_clean(struct be_eq_obj *eqo)
1791{
1792 int num = events_get(eqo);
1793
1794 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1795}
1796
1797static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798{
1799 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 struct be_queue_info *rxq = &rxo->q;
1801 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001802 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001803 struct be_adapter *adapter = rxo->adapter;
1804 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 u16 tail;
1806
Sathya Perlad23e9462012-12-17 19:38:51 +00001807 /* Consume pending rx completions.
1808 * Wait for the flush completion (identified by zero num_rcvd)
1809 * to arrive. Notify CQ even when there are no more CQ entries
1810 * for HW to flush partially coalesced CQ entries.
1811 * In Lancer, there is no need to wait for flush compl.
1812 */
1813 for (;;) {
1814 rxcp = be_rx_compl_get(rxo);
1815 if (rxcp == NULL) {
1816 if (lancer_chip(adapter))
1817 break;
1818
1819 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1820 dev_warn(&adapter->pdev->dev,
1821 "did not receive flush compl\n");
1822 break;
1823 }
1824 be_cq_notify(adapter, rx_cq->id, true, 0);
1825 mdelay(1);
1826 } else {
1827 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001828 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001829 if (rxcp->num_rcvd == 0)
1830 break;
1831 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 }
1833
Sathya Perlad23e9462012-12-17 19:38:51 +00001834 /* After cleanup, leave the CQ in unarmed state */
1835 be_cq_notify(adapter, rx_cq->id, false, 0);
1836
1837 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001839 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 put_page(page_info->page);
1842 memset(page_info, 0, sizeof(*page_info));
1843 }
1844 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001845 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846}
1847
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001848static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001850 struct be_tx_obj *txo;
1851 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001852 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001853 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001854 struct sk_buff *sent_skb;
1855 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001856 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
Sathya Perlaa8e91792009-08-10 03:42:43 +00001858 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1859 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001860 pending_txqs = adapter->num_tx_qs;
1861
1862 for_all_tx_queues(adapter, txo, i) {
1863 txq = &txo->q;
1864 while ((txcp = be_tx_compl_get(&txo->cq))) {
1865 end_idx =
1866 AMAP_GET_BITS(struct amap_eth_tx_compl,
1867 wrb_index, txcp);
1868 num_wrbs += be_tx_compl_process(adapter, txo,
1869 end_idx);
1870 cmpl++;
1871 }
1872 if (cmpl) {
1873 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1874 atomic_sub(num_wrbs, &txq->used);
1875 cmpl = 0;
1876 num_wrbs = 0;
1877 }
1878 if (atomic_read(&txq->used) == 0)
1879 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001880 }
1881
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001882 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001883 break;
1884
1885 mdelay(1);
1886 } while (true);
1887
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001888 for_all_tx_queues(adapter, txo, i) {
1889 txq = &txo->q;
1890 if (atomic_read(&txq->used))
1891 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1892 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001893
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001894 /* free posted tx for which compls will never arrive */
1895 while (atomic_read(&txq->used)) {
1896 sent_skb = txo->sent_skb_list[txq->tail];
1897 end_idx = txq->tail;
1898 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1899 &dummy_wrb);
1900 index_adv(&end_idx, num_wrbs - 1, txq->len);
1901 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1902 atomic_sub(num_wrbs, &txq->used);
1903 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001904 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905}
1906
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001907static void be_evt_queues_destroy(struct be_adapter *adapter)
1908{
1909 struct be_eq_obj *eqo;
1910 int i;
1911
1912 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001913 if (eqo->q.created) {
1914 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301916 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001917 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001918 be_queue_free(adapter, &eqo->q);
1919 }
1920}
1921
1922static int be_evt_queues_create(struct be_adapter *adapter)
1923{
1924 struct be_queue_info *eq;
1925 struct be_eq_obj *eqo;
1926 int i, rc;
1927
Sathya Perla92bf14a2013-08-27 16:57:32 +05301928 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1929 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930
1931 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301932 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1933 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001934 eqo->adapter = adapter;
1935 eqo->tx_budget = BE_TX_BUDGET;
1936 eqo->idx = i;
1937 eqo->max_eqd = BE_MAX_EQD;
1938 eqo->enable_aic = true;
1939
1940 eq = &eqo->q;
1941 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1942 sizeof(struct be_eq_entry));
1943 if (rc)
1944 return rc;
1945
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301946 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 if (rc)
1948 return rc;
1949 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001950 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951}
1952
Sathya Perla5fb379e2009-06-18 00:02:59 +00001953static void be_mcc_queues_destroy(struct be_adapter *adapter)
1954{
1955 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001956
Sathya Perla8788fdc2009-07-27 22:52:03 +00001957 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001958 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001959 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001960 be_queue_free(adapter, q);
1961
Sathya Perla8788fdc2009-07-27 22:52:03 +00001962 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001963 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001964 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001965 be_queue_free(adapter, q);
1966}
1967
1968/* Must be called only after TX qs are created as MCC shares TX EQ */
1969static int be_mcc_queues_create(struct be_adapter *adapter)
1970{
1971 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001972
Sathya Perla8788fdc2009-07-27 22:52:03 +00001973 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001974 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001975 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001976 goto err;
1977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 /* Use the default EQ for MCC completions */
1979 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980 goto mcc_cq_free;
1981
Sathya Perla8788fdc2009-07-27 22:52:03 +00001982 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001983 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1984 goto mcc_cq_destroy;
1985
Sathya Perla8788fdc2009-07-27 22:52:03 +00001986 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001987 goto mcc_q_free;
1988
1989 return 0;
1990
1991mcc_q_free:
1992 be_queue_free(adapter, q);
1993mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001994 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001995mcc_cq_free:
1996 be_queue_free(adapter, cq);
1997err:
1998 return -1;
1999}
2000
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001static void be_tx_queues_destroy(struct be_adapter *adapter)
2002{
2003 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002004 struct be_tx_obj *txo;
2005 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006
Sathya Perla3c8def92011-06-12 20:01:58 +00002007 for_all_tx_queues(adapter, txo, i) {
2008 q = &txo->q;
2009 if (q->created)
2010 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2011 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012
Sathya Perla3c8def92011-06-12 20:01:58 +00002013 q = &txo->cq;
2014 if (q->created)
2015 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2016 be_queue_free(adapter, q);
2017 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018}
2019
Sathya Perla77071332013-08-27 16:57:34 +05302020static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002023 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302024 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025
Sathya Perla92bf14a2013-08-27 16:57:32 +05302026 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002027
Sathya Perla3c8def92011-06-12 20:01:58 +00002028 for_all_tx_queues(adapter, txo, i) {
2029 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002030 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2031 sizeof(struct be_eth_tx_compl));
2032 if (status)
2033 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035 /* If num_evt_qs is less than num_tx_qs, then more than
2036 * one txq share an eq
2037 */
2038 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2039 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2040 if (status)
2041 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002043 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2044 sizeof(struct be_eth_wrb));
2045 if (status)
2046 return status;
2047
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002048 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049 if (status)
2050 return status;
2051 }
2052
Sathya Perlad3791422012-09-28 04:39:44 +00002053 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2054 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002055 return 0;
2056}
2057
2058static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059{
2060 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002061 struct be_rx_obj *rxo;
2062 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063
Sathya Perla3abcded2010-10-03 22:12:27 -07002064 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002065 q = &rxo->cq;
2066 if (q->created)
2067 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2068 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070}
2071
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002073{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002074 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002075 struct be_rx_obj *rxo;
2076 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077
Sathya Perla92bf14a2013-08-27 16:57:32 +05302078 /* We can create as many RSS rings as there are EQs. */
2079 adapter->num_rx_qs = adapter->num_evt_qs;
2080
2081 /* We'll use RSS only if atleast 2 RSS rings are supported.
2082 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002083 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302084 if (adapter->num_rx_qs > 1)
2085 adapter->num_rx_qs++;
2086
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 for_all_rx_queues(adapter, rxo, i) {
2089 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 cq = &rxo->cq;
2091 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2092 sizeof(struct be_eth_rx_compl));
2093 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002094 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002096 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2097 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002100 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101
Sathya Perlad3791422012-09-28 04:39:44 +00002102 dev_info(&adapter->pdev->dev,
2103 "created %d RSS queue(s) and 1 default RX queue\n",
2104 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002106}
2107
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108static irqreturn_t be_intx(int irq, void *dev)
2109{
Sathya Perlae49cc342012-11-27 19:50:02 +00002110 struct be_eq_obj *eqo = dev;
2111 struct be_adapter *adapter = eqo->adapter;
2112 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002114 /* IRQ is not expected when NAPI is scheduled as the EQ
2115 * will not be armed.
2116 * But, this can happen on Lancer INTx where it takes
2117 * a while to de-assert INTx or in BE2 where occasionaly
2118 * an interrupt may be raised even when EQ is unarmed.
2119 * If NAPI is already scheduled, then counting & notifying
2120 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002121 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002122 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002123 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002124 __napi_schedule(&eqo->napi);
2125 if (num_evts)
2126 eqo->spurious_intr = 0;
2127 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002128 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002129
2130 /* Return IRQ_HANDLED only for the the first spurious intr
2131 * after a valid intr to stop the kernel from branding
2132 * this irq as a bad one!
2133 */
2134 if (num_evts || eqo->spurious_intr++ == 0)
2135 return IRQ_HANDLED;
2136 else
2137 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138}
2139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143
Sathya Perla0b545a62012-11-23 00:27:18 +00002144 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2145 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146 return IRQ_HANDLED;
2147}
2148
Sathya Perla2e588f82011-03-11 02:49:26 +00002149static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150{
Somnath Koture38b1702013-05-29 22:55:56 +00002151 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152}
2153
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2155 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156{
Sathya Perla3abcded2010-10-03 22:12:27 -07002157 struct be_adapter *adapter = rxo->adapter;
2158 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002159 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160 u32 work_done;
2161
2162 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164 if (!rxcp)
2165 break;
2166
Sathya Perla12004ae2011-08-02 19:57:46 +00002167 /* Is it a flush compl that has no data */
2168 if (unlikely(rxcp->num_rcvd == 0))
2169 goto loop_continue;
2170
2171 /* Discard compl with partial DMA Lancer B0 */
2172 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002174 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002175 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002176
Sathya Perla12004ae2011-08-02 19:57:46 +00002177 /* On BE drop pkts that arrive due to imperfect filtering in
2178 * promiscuous mode on some skews
2179 */
2180 if (unlikely(rxcp->port != adapter->port_num &&
2181 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002183 goto loop_continue;
2184 }
2185
2186 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002188 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002190loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002191 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192 }
2193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194 if (work_done) {
2195 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002196
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2198 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201 return work_done;
2202}
2203
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2205 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 for (work_done = 0; work_done < budget; work_done++) {
2211 txcp = be_tx_compl_get(&txo->cq);
2212 if (!txcp)
2213 break;
2214 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002215 AMAP_GET_BITS(struct amap_eth_tx_compl,
2216 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 }
2218
2219 if (work_done) {
2220 be_cq_notify(adapter, txo->cq.id, true, work_done);
2221 atomic_sub(num_wrbs, &txo->q.used);
2222
2223 /* As Tx wrbs have been freed up, wake up netdev queue
2224 * if it was stopped due to lack of tx wrbs. */
2225 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2226 atomic_read(&txo->q.used) < txo->q.len / 2) {
2227 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002228 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002229
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2231 tx_stats(txo)->tx_compl += work_done;
2232 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2233 }
2234 return (work_done < budget); /* Done */
2235}
Sathya Perla3c8def92011-06-12 20:01:58 +00002236
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302237int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238{
2239 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2240 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002241 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002243
Sathya Perla0b545a62012-11-23 00:27:18 +00002244 num_evts = events_get(eqo);
2245
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002246 /* Process all TXQs serviced by this EQ */
2247 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2248 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2249 eqo->tx_budget, i);
2250 if (!tx_done)
2251 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 }
2253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 /* This loop will iterate twice for EQ0 in which
2255 * completions of the last RXQ (default one) are also processed
2256 * For other EQs the loop iterates only once
2257 */
2258 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2259 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2260 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002261 }
2262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263 if (is_mcc_eqo(eqo))
2264 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002265
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002266 if (max_work < budget) {
2267 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002268 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 } else {
2270 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002271 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002272 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274}
2275
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002276void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002277{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002278 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2279 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002280 u32 i;
2281
Sathya Perlad23e9462012-12-17 19:38:51 +00002282 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002283 return;
2284
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002285 if (lancer_chip(adapter)) {
2286 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2287 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2288 sliport_err1 = ioread32(adapter->db +
2289 SLIPORT_ERROR1_OFFSET);
2290 sliport_err2 = ioread32(adapter->db +
2291 SLIPORT_ERROR2_OFFSET);
2292 }
2293 } else {
2294 pci_read_config_dword(adapter->pdev,
2295 PCICFG_UE_STATUS_LOW, &ue_lo);
2296 pci_read_config_dword(adapter->pdev,
2297 PCICFG_UE_STATUS_HIGH, &ue_hi);
2298 pci_read_config_dword(adapter->pdev,
2299 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2300 pci_read_config_dword(adapter->pdev,
2301 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002302
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002303 ue_lo = (ue_lo & ~ue_lo_mask);
2304 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002305 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002306
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002307 /* On certain platforms BE hardware can indicate spurious UEs.
2308 * Allow the h/w to stop working completely in case of a real UE.
2309 * Hence not setting the hw_error for UE detection.
2310 */
2311 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002312 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002313 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002314 "Error detected in the card\n");
2315 }
2316
2317 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2318 dev_err(&adapter->pdev->dev,
2319 "ERR: sliport status 0x%x\n", sliport_status);
2320 dev_err(&adapter->pdev->dev,
2321 "ERR: sliport error1 0x%x\n", sliport_err1);
2322 dev_err(&adapter->pdev->dev,
2323 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002324 }
2325
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002326 if (ue_lo) {
2327 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2328 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002329 dev_err(&adapter->pdev->dev,
2330 "UE: %s bit set\n", ue_status_low_desc[i]);
2331 }
2332 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002333
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002334 if (ue_hi) {
2335 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2336 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002337 dev_err(&adapter->pdev->dev,
2338 "UE: %s bit set\n", ue_status_hi_desc[i]);
2339 }
2340 }
2341
2342}
2343
Sathya Perla8d56ff12009-11-22 22:02:26 +00002344static void be_msix_disable(struct be_adapter *adapter)
2345{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002346 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002347 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002348 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302349 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002350 }
2351}
2352
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002353static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302355 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002356 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357
Sathya Perla92bf14a2013-08-27 16:57:32 +05302358 /* If RoCE is supported, program the max number of NIC vectors that
2359 * may be configured via set-channels, along with vectors needed for
2360 * RoCe. Else, just program the number we'll use initially.
2361 */
2362 if (be_roce_supported(adapter))
2363 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2364 2 * num_online_cpus());
2365 else
2366 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002367
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002368 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369 adapter->msix_entries[i].entry = i;
2370
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002371 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002372 if (status == 0) {
2373 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302374 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002375 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002376 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2377 num_vec);
2378 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002379 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002380 }
Sathya Perlad3791422012-09-28 04:39:44 +00002381
2382 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302383
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002384 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2385 if (!be_physfn(adapter))
2386 return status;
2387 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002388done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302389 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2390 adapter->num_msix_roce_vec = num_vec / 2;
2391 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2392 adapter->num_msix_roce_vec);
2393 }
2394
2395 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2396
2397 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2398 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002399 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400}
2401
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002402static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302405 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406}
2407
2408static int be_msix_register(struct be_adapter *adapter)
2409{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410 struct net_device *netdev = adapter->netdev;
2411 struct be_eq_obj *eqo;
2412 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 for_all_evt_queues(adapter, eqo, i) {
2415 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2416 vec = be_msix_vec_get(adapter, eqo);
2417 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002418 if (status)
2419 goto err_msix;
2420 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002423err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2425 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2426 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2427 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002428 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429 return status;
2430}
2431
2432static int be_irq_register(struct be_adapter *adapter)
2433{
2434 struct net_device *netdev = adapter->netdev;
2435 int status;
2436
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002437 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002438 status = be_msix_register(adapter);
2439 if (status == 0)
2440 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002441 /* INTx is not supported for VF */
2442 if (!be_physfn(adapter))
2443 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444 }
2445
Sathya Perlae49cc342012-11-27 19:50:02 +00002446 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447 netdev->irq = adapter->pdev->irq;
2448 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002449 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 if (status) {
2451 dev_err(&adapter->pdev->dev,
2452 "INTx request IRQ failed - err %d\n", status);
2453 return status;
2454 }
2455done:
2456 adapter->isr_registered = true;
2457 return 0;
2458}
2459
2460static void be_irq_unregister(struct be_adapter *adapter)
2461{
2462 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002463 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002464 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465
2466 if (!adapter->isr_registered)
2467 return;
2468
2469 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002470 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002471 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472 goto done;
2473 }
2474
2475 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002476 for_all_evt_queues(adapter, eqo, i)
2477 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479done:
2480 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481}
2482
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002484{
2485 struct be_queue_info *q;
2486 struct be_rx_obj *rxo;
2487 int i;
2488
2489 for_all_rx_queues(adapter, rxo, i) {
2490 q = &rxo->q;
2491 if (q->created) {
2492 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002493 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002494 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002495 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002496 }
2497}
2498
Sathya Perla889cd4b2010-05-30 23:33:45 +00002499static int be_close(struct net_device *netdev)
2500{
2501 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002502 struct be_eq_obj *eqo;
2503 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002504
Parav Pandit045508a2012-03-26 14:27:13 +00002505 be_roce_dev_close(adapter);
2506
Somnath Kotur04d3d622013-05-02 03:36:55 +00002507 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2508 for_all_evt_queues(adapter, eqo, i)
2509 napi_disable(&eqo->napi);
2510 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2511 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002512
2513 be_async_mcc_disable(adapter);
2514
2515 /* Wait for all pending tx completions to arrive so that
2516 * all tx skbs are freed.
2517 */
Sathya Perlafba87552013-05-08 02:05:50 +00002518 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302519 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002520
2521 be_rx_qs_destroy(adapter);
2522
2523 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524 if (msix_enabled(adapter))
2525 synchronize_irq(be_msix_vec_get(adapter, eqo));
2526 else
2527 synchronize_irq(netdev->irq);
2528 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002529 }
2530
Sathya Perla889cd4b2010-05-30 23:33:45 +00002531 be_irq_unregister(adapter);
2532
Sathya Perla482c9e72011-06-29 23:33:17 +00002533 return 0;
2534}
2535
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002536static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002537{
2538 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002539 int rc, i, j;
2540 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002541
2542 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2544 sizeof(struct be_eth_rx_d));
2545 if (rc)
2546 return rc;
2547 }
2548
2549 /* The FW would like the default RXQ to be created first */
2550 rxo = default_rxo(adapter);
2551 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2552 adapter->if_handle, false, &rxo->rss_id);
2553 if (rc)
2554 return rc;
2555
2556 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002557 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002558 rx_frag_size, adapter->if_handle,
2559 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002560 if (rc)
2561 return rc;
2562 }
2563
2564 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002565 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2566 for_all_rss_queues(adapter, rxo, i) {
2567 if ((j + i) >= 128)
2568 break;
2569 rsstable[j + i] = rxo->rss_id;
2570 }
2571 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002572 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2573 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2574
2575 if (!BEx_chip(adapter))
2576 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2577 RSS_ENABLE_UDP_IPV6;
2578
2579 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2580 128);
2581 if (rc) {
2582 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002583 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002584 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002585 }
2586
2587 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002588 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002589 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002590 return 0;
2591}
2592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593static int be_open(struct net_device *netdev)
2594{
2595 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002596 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002597 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002598 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002599 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002600 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002603 if (status)
2604 goto err;
2605
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002606 status = be_irq_register(adapter);
2607 if (status)
2608 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002609
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002610 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002611 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002612
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 for_all_tx_queues(adapter, txo, i)
2614 be_cq_notify(adapter, txo->cq.id, true, 0);
2615
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002616 be_async_mcc_enable(adapter);
2617
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002618 for_all_evt_queues(adapter, eqo, i) {
2619 napi_enable(&eqo->napi);
2620 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2621 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002622 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002623
Sathya Perla323ff712012-09-28 04:39:43 +00002624 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002625 if (!status)
2626 be_link_status_update(adapter, link_status);
2627
Sathya Perlafba87552013-05-08 02:05:50 +00002628 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002629 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002630 return 0;
2631err:
2632 be_close(adapter->netdev);
2633 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002634}
2635
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002636static int be_setup_wol(struct be_adapter *adapter, bool enable)
2637{
2638 struct be_dma_mem cmd;
2639 int status = 0;
2640 u8 mac[ETH_ALEN];
2641
2642 memset(mac, 0, ETH_ALEN);
2643
2644 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002645 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002646 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002647 if (cmd.va == NULL)
2648 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002649
2650 if (enable) {
2651 status = pci_write_config_dword(adapter->pdev,
2652 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2653 if (status) {
2654 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002655 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002656 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2657 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002658 return status;
2659 }
2660 status = be_cmd_enable_magic_wol(adapter,
2661 adapter->netdev->dev_addr, &cmd);
2662 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2663 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2664 } else {
2665 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2666 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2667 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2668 }
2669
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002670 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002671 return status;
2672}
2673
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002674/*
2675 * Generate a seed MAC address from the PF MAC Address using jhash.
2676 * MAC Address for VFs are assigned incrementally starting from the seed.
2677 * These addresses are programmed in the ASIC by the PF and the VF driver
2678 * queries for the MAC address during its probe.
2679 */
Sathya Perla4c876612013-02-03 20:30:11 +00002680static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002681{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002682 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002683 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002684 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002685 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002686
2687 be_vf_eth_addr_generate(adapter, mac);
2688
Sathya Perla11ac75e2011-12-13 00:58:50 +00002689 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302690 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002691 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002692 vf_cfg->if_handle,
2693 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302694 else
2695 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2696 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002697
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002698 if (status)
2699 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002700 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002701 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002702 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002703
2704 mac[5] += 1;
2705 }
2706 return status;
2707}
2708
Sathya Perla4c876612013-02-03 20:30:11 +00002709static int be_vfs_mac_query(struct be_adapter *adapter)
2710{
2711 int status, vf;
2712 u8 mac[ETH_ALEN];
2713 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302714 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002715
2716 for_all_vfs(adapter, vf_cfg, vf) {
2717 be_cmd_get_mac_from_list(adapter, mac, &active,
2718 &vf_cfg->pmac_id, 0);
2719
2720 status = be_cmd_mac_addr_query(adapter, mac, false,
2721 vf_cfg->if_handle, 0);
2722 if (status)
2723 return status;
2724 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2725 }
2726 return 0;
2727}
2728
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002729static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002730{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002731 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002732 u32 vf;
2733
Sathya Perla257a3fe2013-06-14 15:54:51 +05302734 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002735 dev_warn(&adapter->pdev->dev,
2736 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002737 goto done;
2738 }
2739
Sathya Perlab4c1df92013-05-08 02:05:47 +00002740 pci_disable_sriov(adapter->pdev);
2741
Sathya Perla11ac75e2011-12-13 00:58:50 +00002742 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302743 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002744 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2745 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302746 else
2747 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2748 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002749
Sathya Perla11ac75e2011-12-13 00:58:50 +00002750 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2751 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002752done:
2753 kfree(adapter->vf_cfg);
2754 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002755}
2756
Sathya Perla77071332013-08-27 16:57:34 +05302757static void be_clear_queues(struct be_adapter *adapter)
2758{
2759 be_mcc_queues_destroy(adapter);
2760 be_rx_cqs_destroy(adapter);
2761 be_tx_queues_destroy(adapter);
2762 be_evt_queues_destroy(adapter);
2763}
2764
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302765static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002766{
Sathya Perla191eb752012-02-23 18:50:13 +00002767 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2768 cancel_delayed_work_sync(&adapter->work);
2769 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2770 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302771}
2772
2773static int be_clear(struct be_adapter *adapter)
2774{
2775 int i;
2776
2777 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002778
Sathya Perla11ac75e2011-12-13 00:58:50 +00002779 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002780 be_vf_clear(adapter);
2781
Sathya Perla2d17f402013-07-23 15:25:04 +05302782 /* delete the primary mac along with the uc-mac list */
2783 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002784 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302785 adapter->pmac_id[i], 0);
2786 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002787
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002788 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002789
Sathya Perla77071332013-08-27 16:57:34 +05302790 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002791
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002792 kfree(adapter->pmac_id);
2793 adapter->pmac_id = NULL;
2794
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002795 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002796 return 0;
2797}
2798
Sathya Perla4c876612013-02-03 20:30:11 +00002799static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002800{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302801 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002802 struct be_vf_cfg *vf_cfg;
2803 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002804 int status;
2805
Sathya Perla4c876612013-02-03 20:30:11 +00002806 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2807 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002808
Sathya Perla4c876612013-02-03 20:30:11 +00002809 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302810 if (!BE3_chip(adapter)) {
2811 status = be_cmd_get_profile_config(adapter, &res,
2812 vf + 1);
2813 if (!status)
2814 cap_flags = res.if_cap_flags;
2815 }
Sathya Perla4c876612013-02-03 20:30:11 +00002816
2817 /* If a FW profile exists, then cap_flags are updated */
2818 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2819 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2820 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2821 &vf_cfg->if_handle, vf + 1);
2822 if (status)
2823 goto err;
2824 }
2825err:
2826 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002827}
2828
Sathya Perla39f1d942012-05-08 19:41:24 +00002829static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002830{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002831 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002832 int vf;
2833
Sathya Perla39f1d942012-05-08 19:41:24 +00002834 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2835 GFP_KERNEL);
2836 if (!adapter->vf_cfg)
2837 return -ENOMEM;
2838
Sathya Perla11ac75e2011-12-13 00:58:50 +00002839 for_all_vfs(adapter, vf_cfg, vf) {
2840 vf_cfg->if_handle = -1;
2841 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002842 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002843 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002844}
2845
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002846static int be_vf_setup(struct be_adapter *adapter)
2847{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002848 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002849 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002850 int status, old_vfs, vf;
2851 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302852 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002853
Sathya Perla257a3fe2013-06-14 15:54:51 +05302854 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002855 if (old_vfs) {
2856 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2857 if (old_vfs != num_vfs)
2858 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2859 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002860 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302861 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002862 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302863 be_max_vfs(adapter), num_vfs);
2864 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002865 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002866 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002867 }
2868
2869 status = be_vf_setup_init(adapter);
2870 if (status)
2871 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002872
Sathya Perla4c876612013-02-03 20:30:11 +00002873 if (old_vfs) {
2874 for_all_vfs(adapter, vf_cfg, vf) {
2875 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2876 if (status)
2877 goto err;
2878 }
2879 } else {
2880 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002881 if (status)
2882 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002883 }
2884
Sathya Perla4c876612013-02-03 20:30:11 +00002885 if (old_vfs) {
2886 status = be_vfs_mac_query(adapter);
2887 if (status)
2888 goto err;
2889 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002890 status = be_vf_eth_addr_config(adapter);
2891 if (status)
2892 goto err;
2893 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002894
Sathya Perla11ac75e2011-12-13 00:58:50 +00002895 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302896 /* Allow VFs to programs MAC/VLAN filters */
2897 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2898 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2899 status = be_cmd_set_fn_privileges(adapter,
2900 privileges |
2901 BE_PRIV_FILTMGMT,
2902 vf + 1);
2903 if (!status)
2904 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2905 vf);
2906 }
2907
Sathya Perla4c876612013-02-03 20:30:11 +00002908 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2909 * Allow full available bandwidth
2910 */
2911 if (BE3_chip(adapter) && !old_vfs)
2912 be_cmd_set_qos(adapter, 1000, vf+1);
2913
2914 status = be_cmd_link_status_query(adapter, &lnk_speed,
2915 NULL, vf + 1);
2916 if (!status)
2917 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002918
2919 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002920 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002921 if (status)
2922 goto err;
2923 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002924
2925 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002926 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002927
2928 if (!old_vfs) {
2929 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2930 if (status) {
2931 dev_err(dev, "SRIOV enable failed\n");
2932 adapter->num_vfs = 0;
2933 goto err;
2934 }
2935 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002936 return 0;
2937err:
Sathya Perla4c876612013-02-03 20:30:11 +00002938 dev_err(dev, "VF setup failed\n");
2939 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002940 return status;
2941}
2942
Sathya Perla92bf14a2013-08-27 16:57:32 +05302943/* On BE2/BE3 FW does not suggest the supported limits */
2944static void BEx_get_resources(struct be_adapter *adapter,
2945 struct be_resources *res)
2946{
2947 struct pci_dev *pdev = adapter->pdev;
2948 bool use_sriov = false;
2949
2950 if (BE3_chip(adapter) && be_physfn(adapter)) {
2951 int max_vfs;
2952
2953 max_vfs = pci_sriov_get_totalvfs(pdev);
2954 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2955 use_sriov = res->max_vfs && num_vfs;
2956 }
2957
2958 if (be_physfn(adapter))
2959 res->max_uc_mac = BE_UC_PMAC_COUNT;
2960 else
2961 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2962
2963 if (adapter->function_mode & FLEX10_MODE)
2964 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2965 else
2966 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2967 res->max_mcast_mac = BE_MAX_MC;
2968
2969 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2970 !be_physfn(adapter))
2971 res->max_tx_qs = 1;
2972 else
2973 res->max_tx_qs = BE3_MAX_TX_QS;
2974
2975 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2976 !use_sriov && be_physfn(adapter))
2977 res->max_rss_qs = (adapter->be3_native) ?
2978 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2979 res->max_rx_qs = res->max_rss_qs + 1;
2980
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302981 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302982
2983 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2984 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2985 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2986}
2987
Sathya Perla30128032011-11-10 19:17:57 +00002988static void be_setup_init(struct be_adapter *adapter)
2989{
2990 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002991 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002992 adapter->if_handle = -1;
2993 adapter->be3_native = false;
2994 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002995 if (be_physfn(adapter))
2996 adapter->cmd_privileges = MAX_PRIVILEGES;
2997 else
2998 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002999}
3000
Sathya Perla92bf14a2013-08-27 16:57:32 +05303001static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003002{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303003 struct device *dev = &adapter->pdev->dev;
3004 struct be_resources res = {0};
3005 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003006
Sathya Perla92bf14a2013-08-27 16:57:32 +05303007 if (BEx_chip(adapter)) {
3008 BEx_get_resources(adapter, &res);
3009 adapter->res = res;
3010 }
3011
3012 /* For BE3 only check if FW suggests a different max-txqs value */
3013 if (BE3_chip(adapter)) {
3014 status = be_cmd_get_profile_config(adapter, &res, 0);
3015 if (!status && res.max_tx_qs)
3016 adapter->res.max_tx_qs =
3017 min(adapter->res.max_tx_qs, res.max_tx_qs);
3018 }
3019
3020 /* For Lancer, SH etc read per-function resource limits from FW.
3021 * GET_FUNC_CONFIG returns per function guaranteed limits.
3022 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3023 */
Sathya Perla4c876612013-02-03 20:30:11 +00003024 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303025 status = be_cmd_get_func_config(adapter, &res);
3026 if (status)
3027 return status;
3028
3029 /* If RoCE may be enabled stash away half the EQs for RoCE */
3030 if (be_roce_supported(adapter))
3031 res.max_evt_qs /= 2;
3032 adapter->res = res;
3033
3034 if (be_physfn(adapter)) {
3035 status = be_cmd_get_profile_config(adapter, &res, 0);
3036 if (status)
3037 return status;
3038 adapter->res.max_vfs = res.max_vfs;
3039 }
3040
3041 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3042 be_max_txqs(adapter), be_max_rxqs(adapter),
3043 be_max_rss(adapter), be_max_eqs(adapter),
3044 be_max_vfs(adapter));
3045 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3046 be_max_uc(adapter), be_max_mc(adapter),
3047 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003048 }
3049
Sathya Perla92bf14a2013-08-27 16:57:32 +05303050 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003051}
3052
Sathya Perla39f1d942012-05-08 19:41:24 +00003053/* Routine to query per function resource limits */
3054static int be_get_config(struct be_adapter *adapter)
3055{
Sathya Perla4c876612013-02-03 20:30:11 +00003056 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003057
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003058 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3059 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003060 &adapter->function_caps,
3061 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003062 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303063 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003064
Sathya Perla92bf14a2013-08-27 16:57:32 +05303065 status = be_get_resources(adapter);
3066 if (status)
3067 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003068
3069 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303070 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3071 GFP_KERNEL);
3072 if (!adapter->pmac_id)
3073 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003074
Sathya Perla92bf14a2013-08-27 16:57:32 +05303075 /* Sanitize cfg_num_qs based on HW and platform limits */
3076 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3077
3078 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003079}
3080
Sathya Perla95046b92013-07-23 15:25:02 +05303081static int be_mac_setup(struct be_adapter *adapter)
3082{
3083 u8 mac[ETH_ALEN];
3084 int status;
3085
3086 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3087 status = be_cmd_get_perm_mac(adapter, mac);
3088 if (status)
3089 return status;
3090
3091 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3092 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3093 } else {
3094 /* Maybe the HW was reset; dev_addr must be re-programmed */
3095 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3096 }
3097
3098 /* On BE3 VFs this cmd may fail due to lack of privilege.
3099 * Ignore the failure as in this case pmac_id is fetched
3100 * in the IFACE_CREATE cmd.
3101 */
3102 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3103 &adapter->pmac_id[0], 0);
3104 return 0;
3105}
3106
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303107static void be_schedule_worker(struct be_adapter *adapter)
3108{
3109 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3110 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3111}
3112
Sathya Perla77071332013-08-27 16:57:34 +05303113static int be_setup_queues(struct be_adapter *adapter)
3114{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303115 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303116 int status;
3117
3118 status = be_evt_queues_create(adapter);
3119 if (status)
3120 goto err;
3121
3122 status = be_tx_qs_create(adapter);
3123 if (status)
3124 goto err;
3125
3126 status = be_rx_cqs_create(adapter);
3127 if (status)
3128 goto err;
3129
3130 status = be_mcc_queues_create(adapter);
3131 if (status)
3132 goto err;
3133
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303134 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3135 if (status)
3136 goto err;
3137
3138 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3139 if (status)
3140 goto err;
3141
Sathya Perla77071332013-08-27 16:57:34 +05303142 return 0;
3143err:
3144 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3145 return status;
3146}
3147
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303148int be_update_queues(struct be_adapter *adapter)
3149{
3150 struct net_device *netdev = adapter->netdev;
3151 int status;
3152
3153 if (netif_running(netdev))
3154 be_close(netdev);
3155
3156 be_cancel_worker(adapter);
3157
3158 /* If any vectors have been shared with RoCE we cannot re-program
3159 * the MSIx table.
3160 */
3161 if (!adapter->num_msix_roce_vec)
3162 be_msix_disable(adapter);
3163
3164 be_clear_queues(adapter);
3165
3166 if (!msix_enabled(adapter)) {
3167 status = be_msix_enable(adapter);
3168 if (status)
3169 return status;
3170 }
3171
3172 status = be_setup_queues(adapter);
3173 if (status)
3174 return status;
3175
3176 be_schedule_worker(adapter);
3177
3178 if (netif_running(netdev))
3179 status = be_open(netdev);
3180
3181 return status;
3182}
3183
Sathya Perla5fb379e2009-06-18 00:02:59 +00003184static int be_setup(struct be_adapter *adapter)
3185{
Sathya Perla39f1d942012-05-08 19:41:24 +00003186 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303187 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003188 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003189
Sathya Perla30128032011-11-10 19:17:57 +00003190 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003191
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003192 if (!lancer_chip(adapter))
3193 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003194
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003195 status = be_get_config(adapter);
3196 if (status)
3197 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003198
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003199 status = be_msix_enable(adapter);
3200 if (status)
3201 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003202
Sathya Perla77071332013-08-27 16:57:34 +05303203 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3204 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3205 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3206 en_flags |= BE_IF_FLAGS_RSS;
3207 en_flags = en_flags & be_if_cap_flags(adapter);
3208 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3209 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003210 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003211 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003212
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303213 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3214 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303215 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303216 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003217 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003218 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003220 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3221 /* In UMC mode FW does not return right privileges.
3222 * Override with correct privilege equivalent to PF.
3223 */
3224 if (be_is_mc(adapter))
3225 adapter->cmd_privileges = MAX_PRIVILEGES;
3226
Sathya Perla95046b92013-07-23 15:25:02 +05303227 status = be_mac_setup(adapter);
3228 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003229 goto err;
3230
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003231 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003232
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003233 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003234 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003235
3236 be_set_rx_mode(adapter->netdev);
3237
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003238 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003239
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003240 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3241 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003242 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003243
Sathya Perla92bf14a2013-08-27 16:57:32 +05303244 if (be_physfn(adapter) && num_vfs) {
3245 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003246 be_vf_setup(adapter);
3247 else
3248 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003249 }
3250
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003251 status = be_cmd_get_phy_info(adapter);
3252 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003253 adapter->phy.fc_autoneg = 1;
3254
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303255 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003256 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003257err:
3258 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003259 return status;
3260}
3261
Ivan Vecera66268732011-12-08 01:31:21 +00003262#ifdef CONFIG_NET_POLL_CONTROLLER
3263static void be_netpoll(struct net_device *netdev)
3264{
3265 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003266 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003267 int i;
3268
Sathya Perlae49cc342012-11-27 19:50:02 +00003269 for_all_evt_queues(adapter, eqo, i) {
3270 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3271 napi_schedule(&eqo->napi);
3272 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003273
3274 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003275}
3276#endif
3277
Ajit Khaparde84517482009-09-04 03:12:16 +00003278#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003279static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003280
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003281static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003282 const u8 *p, u32 img_start, int image_size,
3283 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003284{
3285 u32 crc_offset;
3286 u8 flashed_crc[4];
3287 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003288
3289 crc_offset = hdr_size + img_start + image_size - 4;
3290
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003291 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003292
3293 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003294 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003295 if (status) {
3296 dev_err(&adapter->pdev->dev,
3297 "could not get crc from flash, not flashing redboot\n");
3298 return false;
3299 }
3300
3301 /*update redboot only if crc does not match*/
3302 if (!memcmp(flashed_crc, p, 4))
3303 return false;
3304 else
3305 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003306}
3307
Sathya Perla306f1342011-08-02 19:57:45 +00003308static bool phy_flashing_required(struct be_adapter *adapter)
3309{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003310 return (adapter->phy.phy_type == TN_8022 &&
3311 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003312}
3313
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003314static bool is_comp_in_ufi(struct be_adapter *adapter,
3315 struct flash_section_info *fsec, int type)
3316{
3317 int i = 0, img_type = 0;
3318 struct flash_section_info_g2 *fsec_g2 = NULL;
3319
Sathya Perlaca34fe32012-11-06 17:48:56 +00003320 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003321 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3322
3323 for (i = 0; i < MAX_FLASH_COMP; i++) {
3324 if (fsec_g2)
3325 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3326 else
3327 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3328
3329 if (img_type == type)
3330 return true;
3331 }
3332 return false;
3333
3334}
3335
Jingoo Han4188e7d2013-08-05 18:02:02 +09003336static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003337 int header_size,
3338 const struct firmware *fw)
3339{
3340 struct flash_section_info *fsec = NULL;
3341 const u8 *p = fw->data;
3342
3343 p += header_size;
3344 while (p < (fw->data + fw->size)) {
3345 fsec = (struct flash_section_info *)p;
3346 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3347 return fsec;
3348 p += 32;
3349 }
3350 return NULL;
3351}
3352
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003353static int be_flash(struct be_adapter *adapter, const u8 *img,
3354 struct be_dma_mem *flash_cmd, int optype, int img_size)
3355{
3356 u32 total_bytes = 0, flash_op, num_bytes = 0;
3357 int status = 0;
3358 struct be_cmd_write_flashrom *req = flash_cmd->va;
3359
3360 total_bytes = img_size;
3361 while (total_bytes) {
3362 num_bytes = min_t(u32, 32*1024, total_bytes);
3363
3364 total_bytes -= num_bytes;
3365
3366 if (!total_bytes) {
3367 if (optype == OPTYPE_PHY_FW)
3368 flash_op = FLASHROM_OPER_PHY_FLASH;
3369 else
3370 flash_op = FLASHROM_OPER_FLASH;
3371 } else {
3372 if (optype == OPTYPE_PHY_FW)
3373 flash_op = FLASHROM_OPER_PHY_SAVE;
3374 else
3375 flash_op = FLASHROM_OPER_SAVE;
3376 }
3377
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003378 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003379 img += num_bytes;
3380 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3381 flash_op, num_bytes);
3382 if (status) {
3383 if (status == ILLEGAL_IOCTL_REQ &&
3384 optype == OPTYPE_PHY_FW)
3385 break;
3386 dev_err(&adapter->pdev->dev,
3387 "cmd to write to flash rom failed.\n");
3388 return status;
3389 }
3390 }
3391 return 0;
3392}
3393
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003394/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003395static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003396 const struct firmware *fw,
3397 struct be_dma_mem *flash_cmd,
3398 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003399
Ajit Khaparde84517482009-09-04 03:12:16 +00003400{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003401 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003402 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003403 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003404 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003405 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003406 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003407
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003408 struct flash_comp gen3_flash_types[] = {
3409 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3410 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3411 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3412 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3413 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3414 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3415 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3416 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3417 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3418 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3419 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3420 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3421 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3422 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3423 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3424 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3425 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3426 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3427 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3428 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003429 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003430
3431 struct flash_comp gen2_flash_types[] = {
3432 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3433 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3434 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3435 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3436 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3437 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3438 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3439 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3440 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3441 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3442 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3443 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3444 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3445 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3446 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3447 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003448 };
3449
Sathya Perlaca34fe32012-11-06 17:48:56 +00003450 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003451 pflashcomp = gen3_flash_types;
3452 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003453 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003454 } else {
3455 pflashcomp = gen2_flash_types;
3456 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003457 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003458 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003459
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003460 /* Get flash section info*/
3461 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3462 if (!fsec) {
3463 dev_err(&adapter->pdev->dev,
3464 "Invalid Cookie. UFI corrupted ?\n");
3465 return -1;
3466 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003467 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003468 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003469 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003470
3471 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3472 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3473 continue;
3474
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003475 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3476 !phy_flashing_required(adapter))
3477 continue;
3478
3479 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3480 redboot = be_flash_redboot(adapter, fw->data,
3481 pflashcomp[i].offset, pflashcomp[i].size,
3482 filehdr_size + img_hdrs_size);
3483 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003484 continue;
3485 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003486
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003487 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003488 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003489 if (p + pflashcomp[i].size > fw->data + fw->size)
3490 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003491
3492 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3493 pflashcomp[i].size);
3494 if (status) {
3495 dev_err(&adapter->pdev->dev,
3496 "Flashing section type %d failed.\n",
3497 pflashcomp[i].img_type);
3498 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003499 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003500 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003501 return 0;
3502}
3503
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003504static int be_flash_skyhawk(struct be_adapter *adapter,
3505 const struct firmware *fw,
3506 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003507{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003508 int status = 0, i, filehdr_size = 0;
3509 int img_offset, img_size, img_optype, redboot;
3510 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3511 const u8 *p = fw->data;
3512 struct flash_section_info *fsec = NULL;
3513
3514 filehdr_size = sizeof(struct flash_file_hdr_g3);
3515 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3516 if (!fsec) {
3517 dev_err(&adapter->pdev->dev,
3518 "Invalid Cookie. UFI corrupted ?\n");
3519 return -1;
3520 }
3521
3522 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3523 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3524 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3525
3526 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3527 case IMAGE_FIRMWARE_iSCSI:
3528 img_optype = OPTYPE_ISCSI_ACTIVE;
3529 break;
3530 case IMAGE_BOOT_CODE:
3531 img_optype = OPTYPE_REDBOOT;
3532 break;
3533 case IMAGE_OPTION_ROM_ISCSI:
3534 img_optype = OPTYPE_BIOS;
3535 break;
3536 case IMAGE_OPTION_ROM_PXE:
3537 img_optype = OPTYPE_PXE_BIOS;
3538 break;
3539 case IMAGE_OPTION_ROM_FCoE:
3540 img_optype = OPTYPE_FCOE_BIOS;
3541 break;
3542 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3543 img_optype = OPTYPE_ISCSI_BACKUP;
3544 break;
3545 case IMAGE_NCSI:
3546 img_optype = OPTYPE_NCSI_FW;
3547 break;
3548 default:
3549 continue;
3550 }
3551
3552 if (img_optype == OPTYPE_REDBOOT) {
3553 redboot = be_flash_redboot(adapter, fw->data,
3554 img_offset, img_size,
3555 filehdr_size + img_hdrs_size);
3556 if (!redboot)
3557 continue;
3558 }
3559
3560 p = fw->data;
3561 p += filehdr_size + img_offset + img_hdrs_size;
3562 if (p + img_size > fw->data + fw->size)
3563 return -1;
3564
3565 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3566 if (status) {
3567 dev_err(&adapter->pdev->dev,
3568 "Flashing section type %d failed.\n",
3569 fsec->fsec_entry[i].type);
3570 return status;
3571 }
3572 }
3573 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003574}
3575
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003576static int lancer_fw_download(struct be_adapter *adapter,
3577 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003578{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003579#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3580#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3581 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003582 const u8 *data_ptr = NULL;
3583 u8 *dest_image_ptr = NULL;
3584 size_t image_size = 0;
3585 u32 chunk_size = 0;
3586 u32 data_written = 0;
3587 u32 offset = 0;
3588 int status = 0;
3589 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003590 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003591
3592 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3593 dev_err(&adapter->pdev->dev,
3594 "FW Image not properly aligned. "
3595 "Length must be 4 byte aligned.\n");
3596 status = -EINVAL;
3597 goto lancer_fw_exit;
3598 }
3599
3600 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3601 + LANCER_FW_DOWNLOAD_CHUNK;
3602 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003603 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003604 if (!flash_cmd.va) {
3605 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003606 goto lancer_fw_exit;
3607 }
3608
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003609 dest_image_ptr = flash_cmd.va +
3610 sizeof(struct lancer_cmd_req_write_object);
3611 image_size = fw->size;
3612 data_ptr = fw->data;
3613
3614 while (image_size) {
3615 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3616
3617 /* Copy the image chunk content. */
3618 memcpy(dest_image_ptr, data_ptr, chunk_size);
3619
3620 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003621 chunk_size, offset,
3622 LANCER_FW_DOWNLOAD_LOCATION,
3623 &data_written, &change_status,
3624 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003625 if (status)
3626 break;
3627
3628 offset += data_written;
3629 data_ptr += data_written;
3630 image_size -= data_written;
3631 }
3632
3633 if (!status) {
3634 /* Commit the FW written */
3635 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003636 0, offset,
3637 LANCER_FW_DOWNLOAD_LOCATION,
3638 &data_written, &change_status,
3639 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003640 }
3641
3642 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3643 flash_cmd.dma);
3644 if (status) {
3645 dev_err(&adapter->pdev->dev,
3646 "Firmware load error. "
3647 "Status code: 0x%x Additional Status: 0x%x\n",
3648 status, add_status);
3649 goto lancer_fw_exit;
3650 }
3651
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003652 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003653 status = lancer_physdev_ctrl(adapter,
3654 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003655 if (status) {
3656 dev_err(&adapter->pdev->dev,
3657 "Adapter busy for FW reset.\n"
3658 "New FW will not be active.\n");
3659 goto lancer_fw_exit;
3660 }
3661 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3662 dev_err(&adapter->pdev->dev,
3663 "System reboot required for new FW"
3664 " to be active\n");
3665 }
3666
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003667 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3668lancer_fw_exit:
3669 return status;
3670}
3671
Sathya Perlaca34fe32012-11-06 17:48:56 +00003672#define UFI_TYPE2 2
3673#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003674#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003675#define UFI_TYPE4 4
3676static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003677 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003678{
3679 if (fhdr == NULL)
3680 goto be_get_ufi_exit;
3681
Sathya Perlaca34fe32012-11-06 17:48:56 +00003682 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3683 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003684 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3685 if (fhdr->asic_type_rev == 0x10)
3686 return UFI_TYPE3R;
3687 else
3688 return UFI_TYPE3;
3689 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003690 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003691
3692be_get_ufi_exit:
3693 dev_err(&adapter->pdev->dev,
3694 "UFI and Interface are not compatible for flashing\n");
3695 return -1;
3696}
3697
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003698static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3699{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003700 struct flash_file_hdr_g3 *fhdr3;
3701 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003702 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003703 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003705
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003706 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003707 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3708 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003709 if (!flash_cmd.va) {
3710 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003711 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003712 }
3713
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003714 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003715 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003716
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003717 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003718
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003719 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3720 for (i = 0; i < num_imgs; i++) {
3721 img_hdr_ptr = (struct image_hdr *)(fw->data +
3722 (sizeof(struct flash_file_hdr_g3) +
3723 i * sizeof(struct image_hdr)));
3724 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003725 switch (ufi_type) {
3726 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003727 status = be_flash_skyhawk(adapter, fw,
3728 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003729 break;
3730 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003731 status = be_flash_BEx(adapter, fw, &flash_cmd,
3732 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003733 break;
3734 case UFI_TYPE3:
3735 /* Do not flash this ufi on BE3-R cards */
3736 if (adapter->asic_rev < 0x10)
3737 status = be_flash_BEx(adapter, fw,
3738 &flash_cmd,
3739 num_imgs);
3740 else {
3741 status = -1;
3742 dev_err(&adapter->pdev->dev,
3743 "Can't load BE3 UFI on BE3R\n");
3744 }
3745 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003746 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003747 }
3748
Sathya Perlaca34fe32012-11-06 17:48:56 +00003749 if (ufi_type == UFI_TYPE2)
3750 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003751 else if (ufi_type == -1)
3752 status = -1;
3753
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003754 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3755 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003756 if (status) {
3757 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003758 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003759 }
3760
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003761 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003762
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003763be_fw_exit:
3764 return status;
3765}
3766
3767int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3768{
3769 const struct firmware *fw;
3770 int status;
3771
3772 if (!netif_running(adapter->netdev)) {
3773 dev_err(&adapter->pdev->dev,
3774 "Firmware load not allowed (interface is down)\n");
3775 return -1;
3776 }
3777
3778 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3779 if (status)
3780 goto fw_exit;
3781
3782 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3783
3784 if (lancer_chip(adapter))
3785 status = lancer_fw_download(adapter, fw);
3786 else
3787 status = be_fw_download(adapter, fw);
3788
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003789 if (!status)
3790 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3791 adapter->fw_on_flash);
3792
Ajit Khaparde84517482009-09-04 03:12:16 +00003793fw_exit:
3794 release_firmware(fw);
3795 return status;
3796}
3797
stephen hemmingere5686ad2012-01-05 19:10:25 +00003798static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003799 .ndo_open = be_open,
3800 .ndo_stop = be_close,
3801 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003802 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003803 .ndo_set_mac_address = be_mac_addr_set,
3804 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003805 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003806 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003807 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3808 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003809 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003810 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003811 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003812 .ndo_get_vf_config = be_get_vf_config,
3813#ifdef CONFIG_NET_POLL_CONTROLLER
3814 .ndo_poll_controller = be_netpoll,
3815#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003816};
3817
3818static void be_netdev_init(struct net_device *netdev)
3819{
3820 struct be_adapter *adapter = netdev_priv(netdev);
3821
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003822 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003824 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003825 if (be_multi_rxq(adapter))
3826 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003827
3828 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003829 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003830
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003831 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003832 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003833
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003834 netdev->priv_flags |= IFF_UNICAST_FLT;
3835
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 netdev->flags |= IFF_MULTICAST;
3837
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003838 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003840 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003841
3842 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843}
3844
3845static void be_unmap_pci_bars(struct be_adapter *adapter)
3846{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003847 if (adapter->csr)
3848 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003849 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003850 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003851}
3852
Sathya Perlace66f782012-11-06 17:48:58 +00003853static int db_bar(struct be_adapter *adapter)
3854{
3855 if (lancer_chip(adapter) || !be_physfn(adapter))
3856 return 0;
3857 else
3858 return 4;
3859}
3860
3861static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003862{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003863 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003864 adapter->roce_db.size = 4096;
3865 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3866 db_bar(adapter));
3867 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3868 db_bar(adapter));
3869 }
Parav Pandit045508a2012-03-26 14:27:13 +00003870 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003871}
3872
3873static int be_map_pci_bars(struct be_adapter *adapter)
3874{
3875 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003876 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003877
Sathya Perlace66f782012-11-06 17:48:58 +00003878 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3879 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3880 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003881
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003882 if (BEx_chip(adapter) && be_physfn(adapter)) {
3883 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3884 if (adapter->csr == NULL)
3885 return -ENOMEM;
3886 }
3887
Sathya Perlace66f782012-11-06 17:48:58 +00003888 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003889 if (addr == NULL)
3890 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003891 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003892
3893 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003894 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003895
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003896pci_map_err:
3897 be_unmap_pci_bars(adapter);
3898 return -ENOMEM;
3899}
3900
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901static void be_ctrl_cleanup(struct be_adapter *adapter)
3902{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003903 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003904
3905 be_unmap_pci_bars(adapter);
3906
3907 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003908 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3909 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003910
Sathya Perla5b8821b2011-08-02 19:57:44 +00003911 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003912 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003913 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3914 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915}
3916
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003917static int be_ctrl_init(struct be_adapter *adapter)
3918{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003919 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3920 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003921 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003922 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003923 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924
Sathya Perlace66f782012-11-06 17:48:58 +00003925 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3926 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3927 SLI_INTF_FAMILY_SHIFT;
3928 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3929
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003930 status = be_map_pci_bars(adapter);
3931 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003932 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003933
3934 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003935 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3936 mbox_mem_alloc->size,
3937 &mbox_mem_alloc->dma,
3938 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003939 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003940 status = -ENOMEM;
3941 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003942 }
3943 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3944 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3945 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3946 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003947
Sathya Perla5b8821b2011-08-02 19:57:44 +00003948 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3949 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003950 &rx_filter->dma,
3951 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003952 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003953 status = -ENOMEM;
3954 goto free_mbox;
3955 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003956
Ivan Vecera29849612010-12-14 05:43:19 +00003957 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003958 spin_lock_init(&adapter->mcc_lock);
3959 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003960
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003961 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003962 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003963 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003964
3965free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003966 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3967 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003968
3969unmap_pci_bars:
3970 be_unmap_pci_bars(adapter);
3971
3972done:
3973 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974}
3975
3976static void be_stats_cleanup(struct be_adapter *adapter)
3977{
Sathya Perla3abcded2010-10-03 22:12:27 -07003978 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003979
3980 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003981 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3982 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003983}
3984
3985static int be_stats_init(struct be_adapter *adapter)
3986{
Sathya Perla3abcded2010-10-03 22:12:27 -07003987 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988
Sathya Perlaca34fe32012-11-06 17:48:56 +00003989 if (lancer_chip(adapter))
3990 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3991 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003992 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003993 else
3994 /* BE3 and Skyhawk */
3995 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3996
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003997 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003998 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999 if (cmd->va == NULL)
4000 return -1;
4001 return 0;
4002}
4003
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004004static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004005{
4006 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004007
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004008 if (!adapter)
4009 return;
4010
Parav Pandit045508a2012-03-26 14:27:13 +00004011 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004012 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004013
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004014 cancel_delayed_work_sync(&adapter->func_recovery_work);
4015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004016 unregister_netdev(adapter->netdev);
4017
Sathya Perla5fb379e2009-06-18 00:02:59 +00004018 be_clear(adapter);
4019
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004020 /* tell fw we're done with firing cmds */
4021 be_cmd_fw_clean(adapter);
4022
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004023 be_stats_cleanup(adapter);
4024
4025 be_ctrl_cleanup(adapter);
4026
Sathya Perlad6b6d982012-09-05 01:56:48 +00004027 pci_disable_pcie_error_reporting(pdev);
4028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004029 pci_set_drvdata(pdev, NULL);
4030 pci_release_regions(pdev);
4031 pci_disable_device(pdev);
4032
4033 free_netdev(adapter->netdev);
4034}
4035
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004036bool be_is_wol_supported(struct be_adapter *adapter)
4037{
4038 return ((adapter->wol_cap & BE_WOL_CAP) &&
4039 !be_is_wol_excluded(adapter)) ? true : false;
4040}
4041
Somnath Kotur941a77d2012-05-17 22:59:03 +00004042u32 be_get_fw_log_level(struct be_adapter *adapter)
4043{
4044 struct be_dma_mem extfat_cmd;
4045 struct be_fat_conf_params *cfgs;
4046 int status;
4047 u32 level = 0;
4048 int j;
4049
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004050 if (lancer_chip(adapter))
4051 return 0;
4052
Somnath Kotur941a77d2012-05-17 22:59:03 +00004053 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4054 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4055 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4056 &extfat_cmd.dma);
4057
4058 if (!extfat_cmd.va) {
4059 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4060 __func__);
4061 goto err;
4062 }
4063
4064 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4065 if (!status) {
4066 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4067 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004068 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004069 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4070 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4071 }
4072 }
4073 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4074 extfat_cmd.dma);
4075err:
4076 return level;
4077}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004078
Sathya Perla39f1d942012-05-08 19:41:24 +00004079static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004080{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004082 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004083
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004084 status = be_cmd_get_cntl_attributes(adapter);
4085 if (status)
4086 return status;
4087
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004088 status = be_cmd_get_acpi_wol_cap(adapter);
4089 if (status) {
4090 /* in case of a failure to get wol capabillities
4091 * check the exclusion list to determine WOL capability */
4092 if (!be_is_wol_excluded(adapter))
4093 adapter->wol_cap |= BE_WOL_CAP;
4094 }
4095
4096 if (be_is_wol_supported(adapter))
4097 adapter->wol = true;
4098
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004099 /* Must be a power of 2 or else MODULO will BUG_ON */
4100 adapter->be_get_temp_freq = 64;
4101
Somnath Kotur941a77d2012-05-17 22:59:03 +00004102 level = be_get_fw_log_level(adapter);
4103 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4104
Sathya Perla92bf14a2013-08-27 16:57:32 +05304105 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004106 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004107}
4108
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004109static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004110{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004111 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004112 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004113
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004114 status = lancer_test_and_set_rdy_state(adapter);
4115 if (status)
4116 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004117
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004118 if (netif_running(adapter->netdev))
4119 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004120
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004121 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004122
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004123 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004124
4125 status = be_setup(adapter);
4126 if (status)
4127 goto err;
4128
4129 if (netif_running(adapter->netdev)) {
4130 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004131 if (status)
4132 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004133 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004134
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004135 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004136 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004137err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004138 if (status == -EAGAIN)
4139 dev_err(dev, "Waiting for resource provisioning\n");
4140 else
4141 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004142
4143 return status;
4144}
4145
4146static void be_func_recovery_task(struct work_struct *work)
4147{
4148 struct be_adapter *adapter =
4149 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004150 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004151
4152 be_detect_error(adapter);
4153
4154 if (adapter->hw_error && lancer_chip(adapter)) {
4155
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004156 rtnl_lock();
4157 netif_device_detach(adapter->netdev);
4158 rtnl_unlock();
4159
4160 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004161 if (!status)
4162 netif_device_attach(adapter->netdev);
4163 }
4164
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004165 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4166 * no need to attempt further recovery.
4167 */
4168 if (!status || status == -EAGAIN)
4169 schedule_delayed_work(&adapter->func_recovery_work,
4170 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004171}
4172
4173static void be_worker(struct work_struct *work)
4174{
4175 struct be_adapter *adapter =
4176 container_of(work, struct be_adapter, work.work);
4177 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004178 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004179 int i;
4180
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004181 /* when interrupts are not yet enabled, just reap any pending
4182 * mcc completions */
4183 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004184 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004185 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004186 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004187 goto reschedule;
4188 }
4189
4190 if (!adapter->stats_cmd_sent) {
4191 if (lancer_chip(adapter))
4192 lancer_cmd_get_pport_stats(adapter,
4193 &adapter->stats_cmd);
4194 else
4195 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4196 }
4197
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304198 if (be_physfn(adapter) &&
4199 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004200 be_cmd_get_die_temperature(adapter);
4201
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004202 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004203 if (rxo->rx_post_starved) {
4204 rxo->rx_post_starved = false;
4205 be_post_rx_frags(rxo, GFP_KERNEL);
4206 }
4207 }
4208
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004209 for_all_evt_queues(adapter, eqo, i)
4210 be_eqd_update(adapter, eqo);
4211
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004212reschedule:
4213 adapter->work_counter++;
4214 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4215}
4216
Sathya Perla257a3fe2013-06-14 15:54:51 +05304217/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004218static bool be_reset_required(struct be_adapter *adapter)
4219{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304220 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004221}
4222
Sathya Perlad3791422012-09-28 04:39:44 +00004223static char *mc_name(struct be_adapter *adapter)
4224{
4225 if (adapter->function_mode & FLEX10_MODE)
4226 return "FLEX10";
4227 else if (adapter->function_mode & VNIC_MODE)
4228 return "vNIC";
4229 else if (adapter->function_mode & UMC_ENABLED)
4230 return "UMC";
4231 else
4232 return "";
4233}
4234
4235static inline char *func_name(struct be_adapter *adapter)
4236{
4237 return be_physfn(adapter) ? "PF" : "VF";
4238}
4239
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004240static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004241{
4242 int status = 0;
4243 struct be_adapter *adapter;
4244 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004245 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004246
4247 status = pci_enable_device(pdev);
4248 if (status)
4249 goto do_none;
4250
4251 status = pci_request_regions(pdev, DRV_NAME);
4252 if (status)
4253 goto disable_dev;
4254 pci_set_master(pdev);
4255
Sathya Perla7f640062012-06-05 19:37:20 +00004256 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 if (netdev == NULL) {
4258 status = -ENOMEM;
4259 goto rel_reg;
4260 }
4261 adapter = netdev_priv(netdev);
4262 adapter->pdev = pdev;
4263 pci_set_drvdata(pdev, adapter);
4264 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004265 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004266
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004267 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004268 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004269 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4270 if (status < 0) {
4271 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4272 goto free_netdev;
4273 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004274 netdev->features |= NETIF_F_HIGHDMA;
4275 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004276 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304277 if (!status)
4278 status = dma_set_coherent_mask(&pdev->dev,
4279 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004280 if (status) {
4281 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4282 goto free_netdev;
4283 }
4284 }
4285
Sathya Perlad6b6d982012-09-05 01:56:48 +00004286 status = pci_enable_pcie_error_reporting(pdev);
4287 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004288 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004289
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004290 status = be_ctrl_init(adapter);
4291 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004292 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004293
Sathya Perla2243e2e2009-11-22 22:02:03 +00004294 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004295 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004296 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004297 if (status)
4298 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004299 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004300
Sathya Perla39f1d942012-05-08 19:41:24 +00004301 if (be_reset_required(adapter)) {
4302 status = be_cmd_reset_function(adapter);
4303 if (status)
4304 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004305
Kalesh AP2d177be2013-04-28 22:22:29 +00004306 /* Wait for interrupts to quiesce after an FLR */
4307 msleep(100);
4308 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004309
4310 /* Allow interrupts for other ULPs running on NIC function */
4311 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004312
Kalesh AP2d177be2013-04-28 22:22:29 +00004313 /* tell fw we're ready to fire cmds */
4314 status = be_cmd_fw_init(adapter);
4315 if (status)
4316 goto ctrl_clean;
4317
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004318 status = be_stats_init(adapter);
4319 if (status)
4320 goto ctrl_clean;
4321
Sathya Perla39f1d942012-05-08 19:41:24 +00004322 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004323 if (status)
4324 goto stats_clean;
4325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004326 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004327 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004328 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004329
Sathya Perla5fb379e2009-06-18 00:02:59 +00004330 status = be_setup(adapter);
4331 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004332 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004333
Sathya Perla3abcded2010-10-03 22:12:27 -07004334 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004335 status = register_netdev(netdev);
4336 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004337 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004338
Parav Pandit045508a2012-03-26 14:27:13 +00004339 be_roce_dev_add(adapter);
4340
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004341 schedule_delayed_work(&adapter->func_recovery_work,
4342 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004343
4344 be_cmd_query_port_name(adapter, &port_name);
4345
Sathya Perlad3791422012-09-28 04:39:44 +00004346 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4347 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004348
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004349 return 0;
4350
Sathya Perla5fb379e2009-06-18 00:02:59 +00004351unsetup:
4352 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004353stats_clean:
4354 be_stats_cleanup(adapter);
4355ctrl_clean:
4356 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004357free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004358 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004359 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004360rel_reg:
4361 pci_release_regions(pdev);
4362disable_dev:
4363 pci_disable_device(pdev);
4364do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004365 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004366 return status;
4367}
4368
4369static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4370{
4371 struct be_adapter *adapter = pci_get_drvdata(pdev);
4372 struct net_device *netdev = adapter->netdev;
4373
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004374 if (adapter->wol)
4375 be_setup_wol(adapter, true);
4376
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004377 cancel_delayed_work_sync(&adapter->func_recovery_work);
4378
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004379 netif_device_detach(netdev);
4380 if (netif_running(netdev)) {
4381 rtnl_lock();
4382 be_close(netdev);
4383 rtnl_unlock();
4384 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004385 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004386
4387 pci_save_state(pdev);
4388 pci_disable_device(pdev);
4389 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4390 return 0;
4391}
4392
4393static int be_resume(struct pci_dev *pdev)
4394{
4395 int status = 0;
4396 struct be_adapter *adapter = pci_get_drvdata(pdev);
4397 struct net_device *netdev = adapter->netdev;
4398
4399 netif_device_detach(netdev);
4400
4401 status = pci_enable_device(pdev);
4402 if (status)
4403 return status;
4404
Yijing Wang1ca01512013-06-27 20:53:42 +08004405 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004406 pci_restore_state(pdev);
4407
Sathya Perla2243e2e2009-11-22 22:02:03 +00004408 /* tell fw we're ready to fire cmds */
4409 status = be_cmd_fw_init(adapter);
4410 if (status)
4411 return status;
4412
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004413 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004414 if (netif_running(netdev)) {
4415 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004416 be_open(netdev);
4417 rtnl_unlock();
4418 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004419
4420 schedule_delayed_work(&adapter->func_recovery_work,
4421 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004422 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004423
4424 if (adapter->wol)
4425 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004426
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004427 return 0;
4428}
4429
Sathya Perla82456b02010-02-17 01:35:37 +00004430/*
4431 * An FLR will stop BE from DMAing any data.
4432 */
4433static void be_shutdown(struct pci_dev *pdev)
4434{
4435 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004436
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004437 if (!adapter)
4438 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004439
Sathya Perla0f4a6822011-03-21 20:49:28 +00004440 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004441 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004442
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004443 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004444
Ajit Khaparde57841862011-04-06 18:08:43 +00004445 be_cmd_reset_function(adapter);
4446
Sathya Perla82456b02010-02-17 01:35:37 +00004447 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004448}
4449
Sathya Perlacf588472010-02-14 21:22:01 +00004450static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4451 pci_channel_state_t state)
4452{
4453 struct be_adapter *adapter = pci_get_drvdata(pdev);
4454 struct net_device *netdev = adapter->netdev;
4455
4456 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4457
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004458 if (!adapter->eeh_error) {
4459 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004460
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004461 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004462
Sathya Perlacf588472010-02-14 21:22:01 +00004463 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004464 netif_device_detach(netdev);
4465 if (netif_running(netdev))
4466 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004467 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004468
4469 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004470 }
Sathya Perlacf588472010-02-14 21:22:01 +00004471
4472 if (state == pci_channel_io_perm_failure)
4473 return PCI_ERS_RESULT_DISCONNECT;
4474
4475 pci_disable_device(pdev);
4476
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004477 /* The error could cause the FW to trigger a flash debug dump.
4478 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004479 * can cause it not to recover; wait for it to finish.
4480 * Wait only for first function as it is needed only once per
4481 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004482 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004483 if (pdev->devfn == 0)
4484 ssleep(30);
4485
Sathya Perlacf588472010-02-14 21:22:01 +00004486 return PCI_ERS_RESULT_NEED_RESET;
4487}
4488
4489static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4490{
4491 struct be_adapter *adapter = pci_get_drvdata(pdev);
4492 int status;
4493
4494 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004495
4496 status = pci_enable_device(pdev);
4497 if (status)
4498 return PCI_ERS_RESULT_DISCONNECT;
4499
4500 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004501 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004502 pci_restore_state(pdev);
4503
4504 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004505 dev_info(&adapter->pdev->dev,
4506 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004507 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004508 if (status)
4509 return PCI_ERS_RESULT_DISCONNECT;
4510
Sathya Perlad6b6d982012-09-05 01:56:48 +00004511 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004512 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004513 return PCI_ERS_RESULT_RECOVERED;
4514}
4515
4516static void be_eeh_resume(struct pci_dev *pdev)
4517{
4518 int status = 0;
4519 struct be_adapter *adapter = pci_get_drvdata(pdev);
4520 struct net_device *netdev = adapter->netdev;
4521
4522 dev_info(&adapter->pdev->dev, "EEH resume\n");
4523
4524 pci_save_state(pdev);
4525
Kalesh AP2d177be2013-04-28 22:22:29 +00004526 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004527 if (status)
4528 goto err;
4529
Kalesh AP2d177be2013-04-28 22:22:29 +00004530 /* tell fw we're ready to fire cmds */
4531 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004532 if (status)
4533 goto err;
4534
Sathya Perlacf588472010-02-14 21:22:01 +00004535 status = be_setup(adapter);
4536 if (status)
4537 goto err;
4538
4539 if (netif_running(netdev)) {
4540 status = be_open(netdev);
4541 if (status)
4542 goto err;
4543 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004544
4545 schedule_delayed_work(&adapter->func_recovery_work,
4546 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004547 netif_device_attach(netdev);
4548 return;
4549err:
4550 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004551}
4552
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004553static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004554 .error_detected = be_eeh_err_detected,
4555 .slot_reset = be_eeh_reset,
4556 .resume = be_eeh_resume,
4557};
4558
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004559static struct pci_driver be_driver = {
4560 .name = DRV_NAME,
4561 .id_table = be_dev_ids,
4562 .probe = be_probe,
4563 .remove = be_remove,
4564 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004565 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004566 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004567 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568};
4569
4570static int __init be_init_module(void)
4571{
Joe Perches8e95a202009-12-03 07:58:21 +00004572 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4573 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004574 printk(KERN_WARNING DRV_NAME
4575 " : Module param rx_frag_size must be 2048/4096/8192."
4576 " Using 2048\n");
4577 rx_frag_size = 2048;
4578 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004580 return pci_register_driver(&be_driver);
4581}
4582module_init(be_init_module);
4583
4584static void __exit be_exit_module(void)
4585{
4586 pci_unregister_driver(&be_driver);
4587}
4588module_exit(be_exit_module);